Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/test_extract_array.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/test_extract_array.py +18 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/conftest.py +242 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py +289 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py +758 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py +429 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py +342 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_clipboard.py +423 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_common.py +650 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_compression.py +378 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_feather.py +252 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_fsspec.py +345 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gbq.py +14 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gcs.py +219 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_html.py +1657 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_http_headers.py +172 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_orc.py +436 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_parquet.py +1424 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_pickle.py +652 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_s3.py +43 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_spss.py +164 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_sql.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_stata.py +2381 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/common.py +563 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py +56 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (198 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/test_extract_array.cpython-310.pyc
ADDED
Binary file (744 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/construction/test_extract_array.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas import Index
|
2 |
+
import pandas._testing as tm
|
3 |
+
from pandas.core.construction import extract_array
|
4 |
+
|
5 |
+
|
6 |
+
def test_extract_array_rangeindex():
|
7 |
+
ri = Index(range(5))
|
8 |
+
|
9 |
+
expected = ri._values
|
10 |
+
res = extract_array(ri, extract_numpy=True, extract_range=True)
|
11 |
+
tm.assert_numpy_array_equal(res, expected)
|
12 |
+
res = extract_array(ri, extract_numpy=False, extract_range=True)
|
13 |
+
tm.assert_numpy_array_equal(res, expected)
|
14 |
+
|
15 |
+
res = extract_array(ri, extract_numpy=True, extract_range=False)
|
16 |
+
tm.assert_index_equal(res, ri)
|
17 |
+
res = extract_array(ri, extract_numpy=False, extract_range=False)
|
18 |
+
tm.assert_index_equal(res, ri)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (326 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc
ADDED
Binary file (5.13 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/conftest.py
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import shlex
|
2 |
+
import subprocess
|
3 |
+
import time
|
4 |
+
import uuid
|
5 |
+
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from pandas.compat import (
|
9 |
+
is_ci_environment,
|
10 |
+
is_platform_arm,
|
11 |
+
is_platform_mac,
|
12 |
+
is_platform_windows,
|
13 |
+
)
|
14 |
+
import pandas.util._test_decorators as td
|
15 |
+
|
16 |
+
import pandas.io.common as icom
|
17 |
+
from pandas.io.parsers import read_csv
|
18 |
+
|
19 |
+
|
20 |
+
@pytest.fixture
|
21 |
+
def compression_to_extension():
|
22 |
+
return {value: key for key, value in icom.extension_to_compression.items()}
|
23 |
+
|
24 |
+
|
25 |
+
@pytest.fixture
|
26 |
+
def tips_file(datapath):
|
27 |
+
"""Path to the tips dataset"""
|
28 |
+
return datapath("io", "data", "csv", "tips.csv")
|
29 |
+
|
30 |
+
|
31 |
+
@pytest.fixture
|
32 |
+
def jsonl_file(datapath):
|
33 |
+
"""Path to a JSONL dataset"""
|
34 |
+
return datapath("io", "parser", "data", "items.jsonl")
|
35 |
+
|
36 |
+
|
37 |
+
@pytest.fixture
|
38 |
+
def salaries_table(datapath):
|
39 |
+
"""DataFrame with the salaries dataset"""
|
40 |
+
return read_csv(datapath("io", "parser", "data", "salaries.csv"), sep="\t")
|
41 |
+
|
42 |
+
|
43 |
+
@pytest.fixture
|
44 |
+
def feather_file(datapath):
|
45 |
+
return datapath("io", "data", "feather", "feather-0_3_1.feather")
|
46 |
+
|
47 |
+
|
48 |
+
@pytest.fixture
|
49 |
+
def xml_file(datapath):
|
50 |
+
return datapath("io", "data", "xml", "books.xml")
|
51 |
+
|
52 |
+
|
53 |
+
@pytest.fixture
|
54 |
+
def s3_base(worker_id, monkeypatch):
|
55 |
+
"""
|
56 |
+
Fixture for mocking S3 interaction.
|
57 |
+
|
58 |
+
Sets up moto server in separate process locally
|
59 |
+
Return url for motoserver/moto CI service
|
60 |
+
"""
|
61 |
+
pytest.importorskip("s3fs")
|
62 |
+
pytest.importorskip("boto3")
|
63 |
+
|
64 |
+
# temporary workaround as moto fails for botocore >= 1.11 otherwise,
|
65 |
+
# see https://github.com/spulec/moto/issues/1924 & 1952
|
66 |
+
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "foobar_key")
|
67 |
+
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "foobar_secret")
|
68 |
+
if is_ci_environment():
|
69 |
+
if is_platform_arm() or is_platform_mac() or is_platform_windows():
|
70 |
+
# NOT RUN on Windows/macOS/ARM, only Ubuntu
|
71 |
+
# - subprocess in CI can cause timeouts
|
72 |
+
# - GitHub Actions do not support
|
73 |
+
# container services for the above OSs
|
74 |
+
# - CircleCI will probably hit the Docker rate pull limit
|
75 |
+
pytest.skip(
|
76 |
+
"S3 tests do not have a corresponding service in "
|
77 |
+
"Windows, macOS or ARM platforms"
|
78 |
+
)
|
79 |
+
else:
|
80 |
+
# set in .github/workflows/unit-tests.yml
|
81 |
+
yield "http://localhost:5000"
|
82 |
+
else:
|
83 |
+
requests = pytest.importorskip("requests")
|
84 |
+
pytest.importorskip("moto")
|
85 |
+
pytest.importorskip("flask") # server mode needs flask too
|
86 |
+
|
87 |
+
# Launching moto in server mode, i.e., as a separate process
|
88 |
+
# with an S3 endpoint on localhost
|
89 |
+
|
90 |
+
worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
|
91 |
+
endpoint_port = f"555{worker_id}"
|
92 |
+
endpoint_uri = f"http://127.0.0.1:{endpoint_port}/"
|
93 |
+
|
94 |
+
# pipe to null to avoid logging in terminal
|
95 |
+
with subprocess.Popen(
|
96 |
+
shlex.split(f"moto_server s3 -p {endpoint_port}"),
|
97 |
+
stdout=subprocess.DEVNULL,
|
98 |
+
stderr=subprocess.DEVNULL,
|
99 |
+
) as proc:
|
100 |
+
timeout = 5
|
101 |
+
while timeout > 0:
|
102 |
+
try:
|
103 |
+
# OK to go once server is accepting connections
|
104 |
+
r = requests.get(endpoint_uri)
|
105 |
+
if r.ok:
|
106 |
+
break
|
107 |
+
except Exception:
|
108 |
+
pass
|
109 |
+
timeout -= 0.1
|
110 |
+
time.sleep(0.1)
|
111 |
+
yield endpoint_uri
|
112 |
+
|
113 |
+
proc.terminate()
|
114 |
+
|
115 |
+
|
116 |
+
@pytest.fixture
|
117 |
+
def s3so(s3_base):
|
118 |
+
return {"client_kwargs": {"endpoint_url": s3_base}}
|
119 |
+
|
120 |
+
|
121 |
+
@pytest.fixture
|
122 |
+
def s3_resource(s3_base):
|
123 |
+
import boto3
|
124 |
+
|
125 |
+
s3 = boto3.resource("s3", endpoint_url=s3_base)
|
126 |
+
return s3
|
127 |
+
|
128 |
+
|
129 |
+
@pytest.fixture
|
130 |
+
def s3_public_bucket(s3_resource):
|
131 |
+
bucket = s3_resource.Bucket(f"pandas-test-{uuid.uuid4()}")
|
132 |
+
bucket.create()
|
133 |
+
yield bucket
|
134 |
+
bucket.objects.delete()
|
135 |
+
bucket.delete()
|
136 |
+
|
137 |
+
|
138 |
+
@pytest.fixture
|
139 |
+
def s3_public_bucket_with_data(
|
140 |
+
s3_public_bucket, tips_file, jsonl_file, feather_file, xml_file
|
141 |
+
):
|
142 |
+
"""
|
143 |
+
The following datasets
|
144 |
+
are loaded.
|
145 |
+
|
146 |
+
- tips.csv
|
147 |
+
- tips.csv.gz
|
148 |
+
- tips.csv.bz2
|
149 |
+
- items.jsonl
|
150 |
+
"""
|
151 |
+
test_s3_files = [
|
152 |
+
("tips#1.csv", tips_file),
|
153 |
+
("tips.csv", tips_file),
|
154 |
+
("tips.csv.gz", tips_file + ".gz"),
|
155 |
+
("tips.csv.bz2", tips_file + ".bz2"),
|
156 |
+
("items.jsonl", jsonl_file),
|
157 |
+
("simple_dataset.feather", feather_file),
|
158 |
+
("books.xml", xml_file),
|
159 |
+
]
|
160 |
+
for s3_key, file_name in test_s3_files:
|
161 |
+
with open(file_name, "rb") as f:
|
162 |
+
s3_public_bucket.put_object(Key=s3_key, Body=f)
|
163 |
+
return s3_public_bucket
|
164 |
+
|
165 |
+
|
166 |
+
@pytest.fixture
|
167 |
+
def s3_private_bucket(s3_resource):
|
168 |
+
bucket = s3_resource.Bucket(f"cant_get_it-{uuid.uuid4()}")
|
169 |
+
bucket.create(ACL="private")
|
170 |
+
yield bucket
|
171 |
+
bucket.objects.delete()
|
172 |
+
bucket.delete()
|
173 |
+
|
174 |
+
|
175 |
+
@pytest.fixture
|
176 |
+
def s3_private_bucket_with_data(
|
177 |
+
s3_private_bucket, tips_file, jsonl_file, feather_file, xml_file
|
178 |
+
):
|
179 |
+
"""
|
180 |
+
The following datasets
|
181 |
+
are loaded.
|
182 |
+
|
183 |
+
- tips.csv
|
184 |
+
- tips.csv.gz
|
185 |
+
- tips.csv.bz2
|
186 |
+
- items.jsonl
|
187 |
+
"""
|
188 |
+
test_s3_files = [
|
189 |
+
("tips#1.csv", tips_file),
|
190 |
+
("tips.csv", tips_file),
|
191 |
+
("tips.csv.gz", tips_file + ".gz"),
|
192 |
+
("tips.csv.bz2", tips_file + ".bz2"),
|
193 |
+
("items.jsonl", jsonl_file),
|
194 |
+
("simple_dataset.feather", feather_file),
|
195 |
+
("books.xml", xml_file),
|
196 |
+
]
|
197 |
+
for s3_key, file_name in test_s3_files:
|
198 |
+
with open(file_name, "rb") as f:
|
199 |
+
s3_private_bucket.put_object(Key=s3_key, Body=f)
|
200 |
+
return s3_private_bucket
|
201 |
+
|
202 |
+
|
203 |
+
_compression_formats_params = [
|
204 |
+
(".no_compress", None),
|
205 |
+
("", None),
|
206 |
+
(".gz", "gzip"),
|
207 |
+
(".GZ", "gzip"),
|
208 |
+
(".bz2", "bz2"),
|
209 |
+
(".BZ2", "bz2"),
|
210 |
+
(".zip", "zip"),
|
211 |
+
(".ZIP", "zip"),
|
212 |
+
(".xz", "xz"),
|
213 |
+
(".XZ", "xz"),
|
214 |
+
pytest.param((".zst", "zstd"), marks=td.skip_if_no("zstandard")),
|
215 |
+
pytest.param((".ZST", "zstd"), marks=td.skip_if_no("zstandard")),
|
216 |
+
]
|
217 |
+
|
218 |
+
|
219 |
+
@pytest.fixture(params=_compression_formats_params[1:])
|
220 |
+
def compression_format(request):
|
221 |
+
return request.param
|
222 |
+
|
223 |
+
|
224 |
+
@pytest.fixture(params=_compression_formats_params)
|
225 |
+
def compression_ext(request):
|
226 |
+
return request.param[0]
|
227 |
+
|
228 |
+
|
229 |
+
@pytest.fixture(
|
230 |
+
params=[
|
231 |
+
"python",
|
232 |
+
pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")),
|
233 |
+
]
|
234 |
+
)
|
235 |
+
def string_storage(request):
|
236 |
+
"""
|
237 |
+
Parametrized fixture for pd.options.mode.string_storage.
|
238 |
+
|
239 |
+
* 'python'
|
240 |
+
* 'pyarrow'
|
241 |
+
"""
|
242 |
+
return request.param
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_css.py
ADDED
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from pandas.errors import CSSWarning
|
4 |
+
|
5 |
+
import pandas._testing as tm
|
6 |
+
|
7 |
+
from pandas.io.formats.css import CSSResolver
|
8 |
+
|
9 |
+
|
10 |
+
def assert_resolves(css, props, inherited=None):
|
11 |
+
resolve = CSSResolver()
|
12 |
+
actual = resolve(css, inherited=inherited)
|
13 |
+
assert props == actual
|
14 |
+
|
15 |
+
|
16 |
+
def assert_same_resolution(css1, css2, inherited=None):
|
17 |
+
resolve = CSSResolver()
|
18 |
+
resolved1 = resolve(css1, inherited=inherited)
|
19 |
+
resolved2 = resolve(css2, inherited=inherited)
|
20 |
+
assert resolved1 == resolved2
|
21 |
+
|
22 |
+
|
23 |
+
@pytest.mark.parametrize(
|
24 |
+
"name,norm,abnorm",
|
25 |
+
[
|
26 |
+
(
|
27 |
+
"whitespace",
|
28 |
+
"hello: world; foo: bar",
|
29 |
+
" \t hello \t :\n world \n ; \n foo: \tbar\n\n",
|
30 |
+
),
|
31 |
+
("case", "hello: world; foo: bar", "Hello: WORLD; foO: bar"),
|
32 |
+
("empty-decl", "hello: world; foo: bar", "; hello: world;; foo: bar;\n; ;"),
|
33 |
+
("empty-list", "", ";"),
|
34 |
+
],
|
35 |
+
)
|
36 |
+
def test_css_parse_normalisation(name, norm, abnorm):
|
37 |
+
assert_same_resolution(norm, abnorm)
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.mark.parametrize(
|
41 |
+
"invalid_css,remainder",
|
42 |
+
[
|
43 |
+
# No colon
|
44 |
+
("hello-world", ""),
|
45 |
+
("border-style: solid; hello-world", "border-style: solid"),
|
46 |
+
(
|
47 |
+
"border-style: solid; hello-world; font-weight: bold",
|
48 |
+
"border-style: solid; font-weight: bold",
|
49 |
+
),
|
50 |
+
# Unclosed string fail
|
51 |
+
# Invalid size
|
52 |
+
("font-size: blah", "font-size: 1em"),
|
53 |
+
("font-size: 1a2b", "font-size: 1em"),
|
54 |
+
("font-size: 1e5pt", "font-size: 1em"),
|
55 |
+
("font-size: 1+6pt", "font-size: 1em"),
|
56 |
+
("font-size: 1unknownunit", "font-size: 1em"),
|
57 |
+
("font-size: 10", "font-size: 1em"),
|
58 |
+
("font-size: 10 pt", "font-size: 1em"),
|
59 |
+
# Too many args
|
60 |
+
("border-top: 1pt solid red green", "border-top: 1pt solid green"),
|
61 |
+
],
|
62 |
+
)
|
63 |
+
def test_css_parse_invalid(invalid_css, remainder):
|
64 |
+
with tm.assert_produces_warning(CSSWarning):
|
65 |
+
assert_same_resolution(invalid_css, remainder)
|
66 |
+
|
67 |
+
|
68 |
+
@pytest.mark.parametrize(
|
69 |
+
"shorthand,expansions",
|
70 |
+
[
|
71 |
+
("margin", ["margin-top", "margin-right", "margin-bottom", "margin-left"]),
|
72 |
+
("padding", ["padding-top", "padding-right", "padding-bottom", "padding-left"]),
|
73 |
+
(
|
74 |
+
"border-width",
|
75 |
+
[
|
76 |
+
"border-top-width",
|
77 |
+
"border-right-width",
|
78 |
+
"border-bottom-width",
|
79 |
+
"border-left-width",
|
80 |
+
],
|
81 |
+
),
|
82 |
+
(
|
83 |
+
"border-color",
|
84 |
+
[
|
85 |
+
"border-top-color",
|
86 |
+
"border-right-color",
|
87 |
+
"border-bottom-color",
|
88 |
+
"border-left-color",
|
89 |
+
],
|
90 |
+
),
|
91 |
+
(
|
92 |
+
"border-style",
|
93 |
+
[
|
94 |
+
"border-top-style",
|
95 |
+
"border-right-style",
|
96 |
+
"border-bottom-style",
|
97 |
+
"border-left-style",
|
98 |
+
],
|
99 |
+
),
|
100 |
+
],
|
101 |
+
)
|
102 |
+
def test_css_side_shorthands(shorthand, expansions):
|
103 |
+
top, right, bottom, left = expansions
|
104 |
+
|
105 |
+
assert_resolves(
|
106 |
+
f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"}
|
107 |
+
)
|
108 |
+
|
109 |
+
assert_resolves(
|
110 |
+
f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"}
|
111 |
+
)
|
112 |
+
|
113 |
+
assert_resolves(
|
114 |
+
f"{shorthand}: 1pt 4pt 2pt",
|
115 |
+
{top: "1pt", right: "4pt", bottom: "2pt", left: "4pt"},
|
116 |
+
)
|
117 |
+
|
118 |
+
assert_resolves(
|
119 |
+
f"{shorthand}: 1pt 4pt 2pt 0pt",
|
120 |
+
{top: "1pt", right: "4pt", bottom: "2pt", left: "0pt"},
|
121 |
+
)
|
122 |
+
|
123 |
+
with tm.assert_produces_warning(CSSWarning):
|
124 |
+
assert_resolves(f"{shorthand}: 1pt 1pt 1pt 1pt 1pt", {})
|
125 |
+
|
126 |
+
|
127 |
+
@pytest.mark.parametrize(
|
128 |
+
"shorthand,sides",
|
129 |
+
[
|
130 |
+
("border-top", ["top"]),
|
131 |
+
("border-right", ["right"]),
|
132 |
+
("border-bottom", ["bottom"]),
|
133 |
+
("border-left", ["left"]),
|
134 |
+
("border", ["top", "right", "bottom", "left"]),
|
135 |
+
],
|
136 |
+
)
|
137 |
+
def test_css_border_shorthand_sides(shorthand, sides):
|
138 |
+
def create_border_dict(sides, color=None, style=None, width=None):
|
139 |
+
resolved = {}
|
140 |
+
for side in sides:
|
141 |
+
if color:
|
142 |
+
resolved[f"border-{side}-color"] = color
|
143 |
+
if style:
|
144 |
+
resolved[f"border-{side}-style"] = style
|
145 |
+
if width:
|
146 |
+
resolved[f"border-{side}-width"] = width
|
147 |
+
return resolved
|
148 |
+
|
149 |
+
assert_resolves(
|
150 |
+
f"{shorthand}: 1pt red solid", create_border_dict(sides, "red", "solid", "1pt")
|
151 |
+
)
|
152 |
+
|
153 |
+
|
154 |
+
@pytest.mark.parametrize(
|
155 |
+
"prop, expected",
|
156 |
+
[
|
157 |
+
("1pt red solid", ("red", "solid", "1pt")),
|
158 |
+
("red 1pt solid", ("red", "solid", "1pt")),
|
159 |
+
("red solid 1pt", ("red", "solid", "1pt")),
|
160 |
+
("solid 1pt red", ("red", "solid", "1pt")),
|
161 |
+
("red solid", ("red", "solid", "1.500000pt")),
|
162 |
+
# Note: color=black is not CSS conforming
|
163 |
+
# (See https://drafts.csswg.org/css-backgrounds/#border-shorthands)
|
164 |
+
("1pt solid", ("black", "solid", "1pt")),
|
165 |
+
("1pt red", ("red", "none", "1pt")),
|
166 |
+
("red", ("red", "none", "1.500000pt")),
|
167 |
+
("1pt", ("black", "none", "1pt")),
|
168 |
+
("solid", ("black", "solid", "1.500000pt")),
|
169 |
+
# Sizes
|
170 |
+
("1em", ("black", "none", "12pt")),
|
171 |
+
],
|
172 |
+
)
|
173 |
+
def test_css_border_shorthands(prop, expected):
|
174 |
+
color, style, width = expected
|
175 |
+
|
176 |
+
assert_resolves(
|
177 |
+
f"border-left: {prop}",
|
178 |
+
{
|
179 |
+
"border-left-color": color,
|
180 |
+
"border-left-style": style,
|
181 |
+
"border-left-width": width,
|
182 |
+
},
|
183 |
+
)
|
184 |
+
|
185 |
+
|
186 |
+
@pytest.mark.parametrize(
|
187 |
+
"style,inherited,equiv",
|
188 |
+
[
|
189 |
+
("margin: 1px; margin: 2px", "", "margin: 2px"),
|
190 |
+
("margin: 1px", "margin: 2px", "margin: 1px"),
|
191 |
+
("margin: 1px; margin: inherit", "margin: 2px", "margin: 2px"),
|
192 |
+
(
|
193 |
+
"margin: 1px; margin-top: 2px",
|
194 |
+
"",
|
195 |
+
"margin-left: 1px; margin-right: 1px; "
|
196 |
+
"margin-bottom: 1px; margin-top: 2px",
|
197 |
+
),
|
198 |
+
("margin-top: 2px", "margin: 1px", "margin: 1px; margin-top: 2px"),
|
199 |
+
("margin: 1px", "margin-top: 2px", "margin: 1px"),
|
200 |
+
(
|
201 |
+
"margin: 1px; margin-top: inherit",
|
202 |
+
"margin: 2px",
|
203 |
+
"margin: 1px; margin-top: 2px",
|
204 |
+
),
|
205 |
+
],
|
206 |
+
)
|
207 |
+
def test_css_precedence(style, inherited, equiv):
|
208 |
+
resolve = CSSResolver()
|
209 |
+
inherited_props = resolve(inherited)
|
210 |
+
style_props = resolve(style, inherited=inherited_props)
|
211 |
+
equiv_props = resolve(equiv)
|
212 |
+
assert style_props == equiv_props
|
213 |
+
|
214 |
+
|
215 |
+
@pytest.mark.parametrize(
|
216 |
+
"style,equiv",
|
217 |
+
[
|
218 |
+
(
|
219 |
+
"margin: 1px; margin-top: inherit",
|
220 |
+
"margin-bottom: 1px; margin-right: 1px; margin-left: 1px",
|
221 |
+
),
|
222 |
+
("margin-top: inherit", ""),
|
223 |
+
("margin-top: initial", ""),
|
224 |
+
],
|
225 |
+
)
|
226 |
+
def test_css_none_absent(style, equiv):
|
227 |
+
assert_same_resolution(style, equiv)
|
228 |
+
|
229 |
+
|
230 |
+
@pytest.mark.parametrize(
|
231 |
+
"size,resolved",
|
232 |
+
[
|
233 |
+
("xx-small", "6pt"),
|
234 |
+
("x-small", f"{7.5:f}pt"),
|
235 |
+
("small", f"{9.6:f}pt"),
|
236 |
+
("medium", "12pt"),
|
237 |
+
("large", f"{13.5:f}pt"),
|
238 |
+
("x-large", "18pt"),
|
239 |
+
("xx-large", "24pt"),
|
240 |
+
("8px", "6pt"),
|
241 |
+
("1.25pc", "15pt"),
|
242 |
+
(".25in", "18pt"),
|
243 |
+
("02.54cm", "72pt"),
|
244 |
+
("25.4mm", "72pt"),
|
245 |
+
("101.6q", "72pt"),
|
246 |
+
("101.6q", "72pt"),
|
247 |
+
],
|
248 |
+
)
|
249 |
+
@pytest.mark.parametrize("relative_to", [None, "16pt"]) # invariant to inherited size
|
250 |
+
def test_css_absolute_font_size(size, relative_to, resolved):
|
251 |
+
if relative_to is None:
|
252 |
+
inherited = None
|
253 |
+
else:
|
254 |
+
inherited = {"font-size": relative_to}
|
255 |
+
assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)
|
256 |
+
|
257 |
+
|
258 |
+
@pytest.mark.parametrize(
|
259 |
+
"size,relative_to,resolved",
|
260 |
+
[
|
261 |
+
("1em", None, "12pt"),
|
262 |
+
("1.0em", None, "12pt"),
|
263 |
+
("1.25em", None, "15pt"),
|
264 |
+
("1em", "16pt", "16pt"),
|
265 |
+
("1.0em", "16pt", "16pt"),
|
266 |
+
("1.25em", "16pt", "20pt"),
|
267 |
+
("1rem", "16pt", "12pt"),
|
268 |
+
("1.0rem", "16pt", "12pt"),
|
269 |
+
("1.25rem", "16pt", "15pt"),
|
270 |
+
("100%", None, "12pt"),
|
271 |
+
("125%", None, "15pt"),
|
272 |
+
("100%", "16pt", "16pt"),
|
273 |
+
("125%", "16pt", "20pt"),
|
274 |
+
("2ex", None, "12pt"),
|
275 |
+
("2.0ex", None, "12pt"),
|
276 |
+
("2.50ex", None, "15pt"),
|
277 |
+
("inherit", "16pt", "16pt"),
|
278 |
+
("smaller", None, "10pt"),
|
279 |
+
("smaller", "18pt", "15pt"),
|
280 |
+
("larger", None, f"{14.4:f}pt"),
|
281 |
+
("larger", "15pt", "18pt"),
|
282 |
+
],
|
283 |
+
)
|
284 |
+
def test_css_relative_font_size(size, relative_to, resolved):
|
285 |
+
if relative_to is None:
|
286 |
+
inherited = None
|
287 |
+
else:
|
288 |
+
inherited = {"font-size": relative_to}
|
289 |
+
assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_csv.py
ADDED
@@ -0,0 +1,758 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
from zipfile import ZipFile
|
5 |
+
|
6 |
+
from _csv import Error
|
7 |
+
import numpy as np
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
import pandas as pd
|
11 |
+
from pandas import (
|
12 |
+
DataFrame,
|
13 |
+
Index,
|
14 |
+
compat,
|
15 |
+
)
|
16 |
+
import pandas._testing as tm
|
17 |
+
|
18 |
+
|
19 |
+
class TestToCSV:
|
20 |
+
def test_to_csv_with_single_column(self):
|
21 |
+
# see gh-18676, https://bugs.python.org/issue32255
|
22 |
+
#
|
23 |
+
# Python's CSV library adds an extraneous '""'
|
24 |
+
# before the newline when the NaN-value is in
|
25 |
+
# the first row. Otherwise, only the newline
|
26 |
+
# character is added. This behavior is inconsistent
|
27 |
+
# and was patched in https://bugs.python.org/pull_request4672.
|
28 |
+
df1 = DataFrame([None, 1])
|
29 |
+
expected1 = """\
|
30 |
+
""
|
31 |
+
1.0
|
32 |
+
"""
|
33 |
+
with tm.ensure_clean("test.csv") as path:
|
34 |
+
df1.to_csv(path, header=None, index=None)
|
35 |
+
with open(path, encoding="utf-8") as f:
|
36 |
+
assert f.read() == expected1
|
37 |
+
|
38 |
+
df2 = DataFrame([1, None])
|
39 |
+
expected2 = """\
|
40 |
+
1.0
|
41 |
+
""
|
42 |
+
"""
|
43 |
+
with tm.ensure_clean("test.csv") as path:
|
44 |
+
df2.to_csv(path, header=None, index=None)
|
45 |
+
with open(path, encoding="utf-8") as f:
|
46 |
+
assert f.read() == expected2
|
47 |
+
|
48 |
+
def test_to_csv_default_encoding(self):
|
49 |
+
# GH17097
|
50 |
+
df = DataFrame({"col": ["AAAAA", "ÄÄÄÄÄ", "ßßßßß", "聞聞聞聞聞"]})
|
51 |
+
|
52 |
+
with tm.ensure_clean("test.csv") as path:
|
53 |
+
# the default to_csv encoding is uft-8.
|
54 |
+
df.to_csv(path)
|
55 |
+
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
|
56 |
+
|
57 |
+
def test_to_csv_quotechar(self):
|
58 |
+
df = DataFrame({"col": [1, 2]})
|
59 |
+
expected = """\
|
60 |
+
"","col"
|
61 |
+
"0","1"
|
62 |
+
"1","2"
|
63 |
+
"""
|
64 |
+
|
65 |
+
with tm.ensure_clean("test.csv") as path:
|
66 |
+
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
|
67 |
+
with open(path, encoding="utf-8") as f:
|
68 |
+
assert f.read() == expected
|
69 |
+
|
70 |
+
expected = """\
|
71 |
+
$$,$col$
|
72 |
+
$0$,$1$
|
73 |
+
$1$,$2$
|
74 |
+
"""
|
75 |
+
|
76 |
+
with tm.ensure_clean("test.csv") as path:
|
77 |
+
df.to_csv(path, quoting=1, quotechar="$")
|
78 |
+
with open(path, encoding="utf-8") as f:
|
79 |
+
assert f.read() == expected
|
80 |
+
|
81 |
+
with tm.ensure_clean("test.csv") as path:
|
82 |
+
with pytest.raises(TypeError, match="quotechar"):
|
83 |
+
df.to_csv(path, quoting=1, quotechar=None)
|
84 |
+
|
85 |
+
def test_to_csv_doublequote(self):
|
86 |
+
df = DataFrame({"col": ['a"a', '"bb"']})
|
87 |
+
expected = '''\
|
88 |
+
"","col"
|
89 |
+
"0","a""a"
|
90 |
+
"1","""bb"""
|
91 |
+
'''
|
92 |
+
|
93 |
+
with tm.ensure_clean("test.csv") as path:
|
94 |
+
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
|
95 |
+
with open(path, encoding="utf-8") as f:
|
96 |
+
assert f.read() == expected
|
97 |
+
|
98 |
+
with tm.ensure_clean("test.csv") as path:
|
99 |
+
with pytest.raises(Error, match="escapechar"):
|
100 |
+
df.to_csv(path, doublequote=False) # no escapechar set
|
101 |
+
|
102 |
+
def test_to_csv_escapechar(self):
|
103 |
+
df = DataFrame({"col": ['a"a', '"bb"']})
|
104 |
+
expected = """\
|
105 |
+
"","col"
|
106 |
+
"0","a\\"a"
|
107 |
+
"1","\\"bb\\""
|
108 |
+
"""
|
109 |
+
|
110 |
+
with tm.ensure_clean("test.csv") as path: # QUOTE_ALL
|
111 |
+
df.to_csv(path, quoting=1, doublequote=False, escapechar="\\")
|
112 |
+
with open(path, encoding="utf-8") as f:
|
113 |
+
assert f.read() == expected
|
114 |
+
|
115 |
+
df = DataFrame({"col": ["a,a", ",bb,"]})
|
116 |
+
expected = """\
|
117 |
+
,col
|
118 |
+
0,a\\,a
|
119 |
+
1,\\,bb\\,
|
120 |
+
"""
|
121 |
+
|
122 |
+
with tm.ensure_clean("test.csv") as path:
|
123 |
+
df.to_csv(path, quoting=3, escapechar="\\") # QUOTE_NONE
|
124 |
+
with open(path, encoding="utf-8") as f:
|
125 |
+
assert f.read() == expected
|
126 |
+
|
127 |
+
def test_csv_to_string(self):
|
128 |
+
df = DataFrame({"col": [1, 2]})
|
129 |
+
expected_rows = [",col", "0,1", "1,2"]
|
130 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
131 |
+
assert df.to_csv() == expected
|
132 |
+
|
133 |
+
def test_to_csv_decimal(self):
|
134 |
+
# see gh-781
|
135 |
+
df = DataFrame({"col1": [1], "col2": ["a"], "col3": [10.1]})
|
136 |
+
|
137 |
+
expected_rows = [",col1,col2,col3", "0,1,a,10.1"]
|
138 |
+
expected_default = tm.convert_rows_list_to_csv_str(expected_rows)
|
139 |
+
assert df.to_csv() == expected_default
|
140 |
+
|
141 |
+
expected_rows = [";col1;col2;col3", "0;1;a;10,1"]
|
142 |
+
expected_european_excel = tm.convert_rows_list_to_csv_str(expected_rows)
|
143 |
+
assert df.to_csv(decimal=",", sep=";") == expected_european_excel
|
144 |
+
|
145 |
+
expected_rows = [",col1,col2,col3", "0,1,a,10.10"]
|
146 |
+
expected_float_format_default = tm.convert_rows_list_to_csv_str(expected_rows)
|
147 |
+
assert df.to_csv(float_format="%.2f") == expected_float_format_default
|
148 |
+
|
149 |
+
expected_rows = [";col1;col2;col3", "0;1;a;10,10"]
|
150 |
+
expected_float_format = tm.convert_rows_list_to_csv_str(expected_rows)
|
151 |
+
assert (
|
152 |
+
df.to_csv(decimal=",", sep=";", float_format="%.2f")
|
153 |
+
== expected_float_format
|
154 |
+
)
|
155 |
+
|
156 |
+
# see gh-11553: testing if decimal is taken into account for '0.0'
|
157 |
+
df = DataFrame({"a": [0, 1.1], "b": [2.2, 3.3], "c": 1})
|
158 |
+
|
159 |
+
expected_rows = ["a,b,c", "0^0,2^2,1", "1^1,3^3,1"]
|
160 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
161 |
+
assert df.to_csv(index=False, decimal="^") == expected
|
162 |
+
|
163 |
+
# same but for an index
|
164 |
+
assert df.set_index("a").to_csv(decimal="^") == expected
|
165 |
+
|
166 |
+
# same for a multi-index
|
167 |
+
assert df.set_index(["a", "b"]).to_csv(decimal="^") == expected
|
168 |
+
|
169 |
+
def test_to_csv_float_format(self):
|
170 |
+
# testing if float_format is taken into account for the index
|
171 |
+
# GH 11553
|
172 |
+
df = DataFrame({"a": [0, 1], "b": [2.2, 3.3], "c": 1})
|
173 |
+
|
174 |
+
expected_rows = ["a,b,c", "0,2.20,1", "1,3.30,1"]
|
175 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
176 |
+
assert df.set_index("a").to_csv(float_format="%.2f") == expected
|
177 |
+
|
178 |
+
# same for a multi-index
|
179 |
+
assert df.set_index(["a", "b"]).to_csv(float_format="%.2f") == expected
|
180 |
+
|
181 |
+
def test_to_csv_na_rep(self):
|
182 |
+
# see gh-11553
|
183 |
+
#
|
184 |
+
# Testing if NaN values are correctly represented in the index.
|
185 |
+
df = DataFrame({"a": [0, np.nan], "b": [0, 1], "c": [2, 3]})
|
186 |
+
expected_rows = ["a,b,c", "0.0,0,2", "_,1,3"]
|
187 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
188 |
+
|
189 |
+
assert df.set_index("a").to_csv(na_rep="_") == expected
|
190 |
+
assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
|
191 |
+
|
192 |
+
# now with an index containing only NaNs
|
193 |
+
df = DataFrame({"a": np.nan, "b": [0, 1], "c": [2, 3]})
|
194 |
+
expected_rows = ["a,b,c", "_,0,2", "_,1,3"]
|
195 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
196 |
+
|
197 |
+
assert df.set_index("a").to_csv(na_rep="_") == expected
|
198 |
+
assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
|
199 |
+
|
200 |
+
# check if na_rep parameter does not break anything when no NaN
|
201 |
+
df = DataFrame({"a": 0, "b": [0, 1], "c": [2, 3]})
|
202 |
+
expected_rows = ["a,b,c", "0,0,2", "0,1,3"]
|
203 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
204 |
+
|
205 |
+
assert df.set_index("a").to_csv(na_rep="_") == expected
|
206 |
+
assert df.set_index(["a", "b"]).to_csv(na_rep="_") == expected
|
207 |
+
|
208 |
+
csv = pd.Series(["a", pd.NA, "c"]).to_csv(na_rep="ZZZZZ")
|
209 |
+
expected = tm.convert_rows_list_to_csv_str([",0", "0,a", "1,ZZZZZ", "2,c"])
|
210 |
+
assert expected == csv
|
211 |
+
|
212 |
+
def test_to_csv_na_rep_nullable_string(self, nullable_string_dtype):
|
213 |
+
# GH 29975
|
214 |
+
# Make sure full na_rep shows up when a dtype is provided
|
215 |
+
expected = tm.convert_rows_list_to_csv_str([",0", "0,a", "1,ZZZZZ", "2,c"])
|
216 |
+
csv = pd.Series(["a", pd.NA, "c"], dtype=nullable_string_dtype).to_csv(
|
217 |
+
na_rep="ZZZZZ"
|
218 |
+
)
|
219 |
+
assert expected == csv
|
220 |
+
|
221 |
+
def test_to_csv_date_format(self):
|
222 |
+
# GH 10209
|
223 |
+
df_sec = DataFrame({"A": pd.date_range("20130101", periods=5, freq="s")})
|
224 |
+
df_day = DataFrame({"A": pd.date_range("20130101", periods=5, freq="d")})
|
225 |
+
|
226 |
+
expected_rows = [
|
227 |
+
",A",
|
228 |
+
"0,2013-01-01 00:00:00",
|
229 |
+
"1,2013-01-01 00:00:01",
|
230 |
+
"2,2013-01-01 00:00:02",
|
231 |
+
"3,2013-01-01 00:00:03",
|
232 |
+
"4,2013-01-01 00:00:04",
|
233 |
+
]
|
234 |
+
expected_default_sec = tm.convert_rows_list_to_csv_str(expected_rows)
|
235 |
+
assert df_sec.to_csv() == expected_default_sec
|
236 |
+
|
237 |
+
expected_rows = [
|
238 |
+
",A",
|
239 |
+
"0,2013-01-01 00:00:00",
|
240 |
+
"1,2013-01-02 00:00:00",
|
241 |
+
"2,2013-01-03 00:00:00",
|
242 |
+
"3,2013-01-04 00:00:00",
|
243 |
+
"4,2013-01-05 00:00:00",
|
244 |
+
]
|
245 |
+
expected_ymdhms_day = tm.convert_rows_list_to_csv_str(expected_rows)
|
246 |
+
assert df_day.to_csv(date_format="%Y-%m-%d %H:%M:%S") == expected_ymdhms_day
|
247 |
+
|
248 |
+
expected_rows = [
|
249 |
+
",A",
|
250 |
+
"0,2013-01-01",
|
251 |
+
"1,2013-01-01",
|
252 |
+
"2,2013-01-01",
|
253 |
+
"3,2013-01-01",
|
254 |
+
"4,2013-01-01",
|
255 |
+
]
|
256 |
+
expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
|
257 |
+
assert df_sec.to_csv(date_format="%Y-%m-%d") == expected_ymd_sec
|
258 |
+
|
259 |
+
expected_rows = [
|
260 |
+
",A",
|
261 |
+
"0,2013-01-01",
|
262 |
+
"1,2013-01-02",
|
263 |
+
"2,2013-01-03",
|
264 |
+
"3,2013-01-04",
|
265 |
+
"4,2013-01-05",
|
266 |
+
]
|
267 |
+
expected_default_day = tm.convert_rows_list_to_csv_str(expected_rows)
|
268 |
+
assert df_day.to_csv() == expected_default_day
|
269 |
+
assert df_day.to_csv(date_format="%Y-%m-%d") == expected_default_day
|
270 |
+
|
271 |
+
# see gh-7791
|
272 |
+
#
|
273 |
+
# Testing if date_format parameter is taken into account
|
274 |
+
# for multi-indexed DataFrames.
|
275 |
+
df_sec["B"] = 0
|
276 |
+
df_sec["C"] = 1
|
277 |
+
|
278 |
+
expected_rows = ["A,B,C", "2013-01-01,0,1.0"]
|
279 |
+
expected_ymd_sec = tm.convert_rows_list_to_csv_str(expected_rows)
|
280 |
+
|
281 |
+
df_sec_grouped = df_sec.groupby([pd.Grouper(key="A", freq="1h"), "B"])
|
282 |
+
assert df_sec_grouped.mean().to_csv(date_format="%Y-%m-%d") == expected_ymd_sec
|
283 |
+
|
284 |
+
def test_to_csv_different_datetime_formats(self):
|
285 |
+
# GH#21734
|
286 |
+
df = DataFrame(
|
287 |
+
{
|
288 |
+
"date": pd.to_datetime("1970-01-01"),
|
289 |
+
"datetime": pd.date_range("1970-01-01", periods=2, freq="h"),
|
290 |
+
}
|
291 |
+
)
|
292 |
+
expected_rows = [
|
293 |
+
"date,datetime",
|
294 |
+
"1970-01-01,1970-01-01 00:00:00",
|
295 |
+
"1970-01-01,1970-01-01 01:00:00",
|
296 |
+
]
|
297 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
298 |
+
assert df.to_csv(index=False) == expected
|
299 |
+
|
300 |
+
def test_to_csv_date_format_in_categorical(self):
|
301 |
+
# GH#40754
|
302 |
+
ser = pd.Series(pd.to_datetime(["2021-03-27", pd.NaT], format="%Y-%m-%d"))
|
303 |
+
ser = ser.astype("category")
|
304 |
+
expected = tm.convert_rows_list_to_csv_str(["0", "2021-03-27", '""'])
|
305 |
+
assert ser.to_csv(index=False) == expected
|
306 |
+
|
307 |
+
ser = pd.Series(
|
308 |
+
pd.date_range(
|
309 |
+
start="2021-03-27", freq="D", periods=1, tz="Europe/Berlin"
|
310 |
+
).append(pd.DatetimeIndex([pd.NaT]))
|
311 |
+
)
|
312 |
+
ser = ser.astype("category")
|
313 |
+
assert ser.to_csv(index=False, date_format="%Y-%m-%d") == expected
|
314 |
+
|
315 |
+
def test_to_csv_float_ea_float_format(self):
|
316 |
+
# GH#45991
|
317 |
+
df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"})
|
318 |
+
df["a"] = df["a"].astype("Float64")
|
319 |
+
result = df.to_csv(index=False, float_format="%.5f")
|
320 |
+
expected = tm.convert_rows_list_to_csv_str(
|
321 |
+
["a,b", "1.10000,c", "2.02000,c", ",c", "6.00001,c"]
|
322 |
+
)
|
323 |
+
assert result == expected
|
324 |
+
|
325 |
+
def test_to_csv_float_ea_no_float_format(self):
|
326 |
+
# GH#45991
|
327 |
+
df = DataFrame({"a": [1.1, 2.02, pd.NA, 6.000006], "b": "c"})
|
328 |
+
df["a"] = df["a"].astype("Float64")
|
329 |
+
result = df.to_csv(index=False)
|
330 |
+
expected = tm.convert_rows_list_to_csv_str(
|
331 |
+
["a,b", "1.1,c", "2.02,c", ",c", "6.000006,c"]
|
332 |
+
)
|
333 |
+
assert result == expected
|
334 |
+
|
335 |
+
def test_to_csv_multi_index(self):
|
336 |
+
# see gh-6618
|
337 |
+
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
|
338 |
+
|
339 |
+
exp_rows = [",1", ",2", "0,1"]
|
340 |
+
exp = tm.convert_rows_list_to_csv_str(exp_rows)
|
341 |
+
assert df.to_csv() == exp
|
342 |
+
|
343 |
+
exp_rows = ["1", "2", "1"]
|
344 |
+
exp = tm.convert_rows_list_to_csv_str(exp_rows)
|
345 |
+
assert df.to_csv(index=False) == exp
|
346 |
+
|
347 |
+
df = DataFrame(
|
348 |
+
[1],
|
349 |
+
columns=pd.MultiIndex.from_arrays([[1], [2]]),
|
350 |
+
index=pd.MultiIndex.from_arrays([[1], [2]]),
|
351 |
+
)
|
352 |
+
|
353 |
+
exp_rows = [",,1", ",,2", "1,2,1"]
|
354 |
+
exp = tm.convert_rows_list_to_csv_str(exp_rows)
|
355 |
+
assert df.to_csv() == exp
|
356 |
+
|
357 |
+
exp_rows = ["1", "2", "1"]
|
358 |
+
exp = tm.convert_rows_list_to_csv_str(exp_rows)
|
359 |
+
assert df.to_csv(index=False) == exp
|
360 |
+
|
361 |
+
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([["foo"], ["bar"]]))
|
362 |
+
|
363 |
+
exp_rows = [",foo", ",bar", "0,1"]
|
364 |
+
exp = tm.convert_rows_list_to_csv_str(exp_rows)
|
365 |
+
assert df.to_csv() == exp
|
366 |
+
|
367 |
+
exp_rows = ["foo", "bar", "1"]
|
368 |
+
exp = tm.convert_rows_list_to_csv_str(exp_rows)
|
369 |
+
assert df.to_csv(index=False) == exp
|
370 |
+
|
371 |
+
@pytest.mark.parametrize(
|
372 |
+
"ind,expected",
|
373 |
+
[
|
374 |
+
(
|
375 |
+
pd.MultiIndex(levels=[[1.0]], codes=[[0]], names=["x"]),
|
376 |
+
"x,data\n1.0,1\n",
|
377 |
+
),
|
378 |
+
(
|
379 |
+
pd.MultiIndex(
|
380 |
+
levels=[[1.0], [2.0]], codes=[[0], [0]], names=["x", "y"]
|
381 |
+
),
|
382 |
+
"x,y,data\n1.0,2.0,1\n",
|
383 |
+
),
|
384 |
+
],
|
385 |
+
)
|
386 |
+
def test_to_csv_single_level_multi_index(self, ind, expected, frame_or_series):
|
387 |
+
# see gh-19589
|
388 |
+
obj = frame_or_series(pd.Series([1], ind, name="data"))
|
389 |
+
|
390 |
+
result = obj.to_csv(lineterminator="\n", header=True)
|
391 |
+
assert result == expected
|
392 |
+
|
393 |
+
def test_to_csv_string_array_ascii(self):
|
394 |
+
# GH 10813
|
395 |
+
str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}]
|
396 |
+
df = DataFrame(str_array)
|
397 |
+
expected_ascii = """\
|
398 |
+
,names
|
399 |
+
0,"['foo', 'bar']"
|
400 |
+
1,"['baz', 'qux']"
|
401 |
+
"""
|
402 |
+
with tm.ensure_clean("str_test.csv") as path:
|
403 |
+
df.to_csv(path, encoding="ascii")
|
404 |
+
with open(path, encoding="utf-8") as f:
|
405 |
+
assert f.read() == expected_ascii
|
406 |
+
|
407 |
+
def test_to_csv_string_array_utf8(self):
|
408 |
+
# GH 10813
|
409 |
+
str_array = [{"names": ["foo", "bar"]}, {"names": ["baz", "qux"]}]
|
410 |
+
df = DataFrame(str_array)
|
411 |
+
expected_utf8 = """\
|
412 |
+
,names
|
413 |
+
0,"['foo', 'bar']"
|
414 |
+
1,"['baz', 'qux']"
|
415 |
+
"""
|
416 |
+
with tm.ensure_clean("unicode_test.csv") as path:
|
417 |
+
df.to_csv(path, encoding="utf-8")
|
418 |
+
with open(path, encoding="utf-8") as f:
|
419 |
+
assert f.read() == expected_utf8
|
420 |
+
|
421 |
+
def test_to_csv_string_with_lf(self):
|
422 |
+
# GH 20353
|
423 |
+
data = {"int": [1, 2, 3], "str_lf": ["abc", "d\nef", "g\nh\n\ni"]}
|
424 |
+
df = DataFrame(data)
|
425 |
+
with tm.ensure_clean("lf_test.csv") as path:
|
426 |
+
# case 1: The default line terminator(=os.linesep)(PR 21406)
|
427 |
+
os_linesep = os.linesep.encode("utf-8")
|
428 |
+
expected_noarg = (
|
429 |
+
b"int,str_lf"
|
430 |
+
+ os_linesep
|
431 |
+
+ b"1,abc"
|
432 |
+
+ os_linesep
|
433 |
+
+ b'2,"d\nef"'
|
434 |
+
+ os_linesep
|
435 |
+
+ b'3,"g\nh\n\ni"'
|
436 |
+
+ os_linesep
|
437 |
+
)
|
438 |
+
df.to_csv(path, index=False)
|
439 |
+
with open(path, "rb") as f:
|
440 |
+
assert f.read() == expected_noarg
|
441 |
+
with tm.ensure_clean("lf_test.csv") as path:
|
442 |
+
# case 2: LF as line terminator
|
443 |
+
expected_lf = b'int,str_lf\n1,abc\n2,"d\nef"\n3,"g\nh\n\ni"\n'
|
444 |
+
df.to_csv(path, lineterminator="\n", index=False)
|
445 |
+
with open(path, "rb") as f:
|
446 |
+
assert f.read() == expected_lf
|
447 |
+
with tm.ensure_clean("lf_test.csv") as path:
|
448 |
+
# case 3: CRLF as line terminator
|
449 |
+
# 'lineterminator' should not change inner element
|
450 |
+
expected_crlf = b'int,str_lf\r\n1,abc\r\n2,"d\nef"\r\n3,"g\nh\n\ni"\r\n'
|
451 |
+
df.to_csv(path, lineterminator="\r\n", index=False)
|
452 |
+
with open(path, "rb") as f:
|
453 |
+
assert f.read() == expected_crlf
|
454 |
+
|
455 |
+
def test_to_csv_string_with_crlf(self):
|
456 |
+
# GH 20353
|
457 |
+
data = {"int": [1, 2, 3], "str_crlf": ["abc", "d\r\nef", "g\r\nh\r\n\r\ni"]}
|
458 |
+
df = DataFrame(data)
|
459 |
+
with tm.ensure_clean("crlf_test.csv") as path:
|
460 |
+
# case 1: The default line terminator(=os.linesep)(PR 21406)
|
461 |
+
os_linesep = os.linesep.encode("utf-8")
|
462 |
+
expected_noarg = (
|
463 |
+
b"int,str_crlf"
|
464 |
+
+ os_linesep
|
465 |
+
+ b"1,abc"
|
466 |
+
+ os_linesep
|
467 |
+
+ b'2,"d\r\nef"'
|
468 |
+
+ os_linesep
|
469 |
+
+ b'3,"g\r\nh\r\n\r\ni"'
|
470 |
+
+ os_linesep
|
471 |
+
)
|
472 |
+
df.to_csv(path, index=False)
|
473 |
+
with open(path, "rb") as f:
|
474 |
+
assert f.read() == expected_noarg
|
475 |
+
with tm.ensure_clean("crlf_test.csv") as path:
|
476 |
+
# case 2: LF as line terminator
|
477 |
+
expected_lf = b'int,str_crlf\n1,abc\n2,"d\r\nef"\n3,"g\r\nh\r\n\r\ni"\n'
|
478 |
+
df.to_csv(path, lineterminator="\n", index=False)
|
479 |
+
with open(path, "rb") as f:
|
480 |
+
assert f.read() == expected_lf
|
481 |
+
with tm.ensure_clean("crlf_test.csv") as path:
|
482 |
+
# case 3: CRLF as line terminator
|
483 |
+
# 'lineterminator' should not change inner element
|
484 |
+
expected_crlf = (
|
485 |
+
b"int,str_crlf\r\n"
|
486 |
+
b"1,abc\r\n"
|
487 |
+
b'2,"d\r\nef"\r\n'
|
488 |
+
b'3,"g\r\nh\r\n\r\ni"\r\n'
|
489 |
+
)
|
490 |
+
df.to_csv(path, lineterminator="\r\n", index=False)
|
491 |
+
with open(path, "rb") as f:
|
492 |
+
assert f.read() == expected_crlf
|
493 |
+
|
494 |
+
def test_to_csv_stdout_file(self, capsys):
|
495 |
+
# GH 21561
|
496 |
+
df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["name_1", "name_2"])
|
497 |
+
expected_rows = [",name_1,name_2", "0,foo,bar", "1,baz,qux"]
|
498 |
+
expected_ascii = tm.convert_rows_list_to_csv_str(expected_rows)
|
499 |
+
|
500 |
+
df.to_csv(sys.stdout, encoding="ascii")
|
501 |
+
captured = capsys.readouterr()
|
502 |
+
|
503 |
+
assert captured.out == expected_ascii
|
504 |
+
assert not sys.stdout.closed
|
505 |
+
|
506 |
+
@pytest.mark.xfail(
|
507 |
+
compat.is_platform_windows(),
|
508 |
+
reason=(
|
509 |
+
"Especially in Windows, file stream should not be passed"
|
510 |
+
"to csv writer without newline='' option."
|
511 |
+
"(https://docs.python.org/3/library/csv.html#csv.writer)"
|
512 |
+
),
|
513 |
+
)
|
514 |
+
def test_to_csv_write_to_open_file(self):
|
515 |
+
# GH 21696
|
516 |
+
df = DataFrame({"a": ["x", "y", "z"]})
|
517 |
+
expected = """\
|
518 |
+
manual header
|
519 |
+
x
|
520 |
+
y
|
521 |
+
z
|
522 |
+
"""
|
523 |
+
with tm.ensure_clean("test.txt") as path:
|
524 |
+
with open(path, "w", encoding="utf-8") as f:
|
525 |
+
f.write("manual header\n")
|
526 |
+
df.to_csv(f, header=None, index=None)
|
527 |
+
with open(path, encoding="utf-8") as f:
|
528 |
+
assert f.read() == expected
|
529 |
+
|
530 |
+
def test_to_csv_write_to_open_file_with_newline_py3(self):
|
531 |
+
# see gh-21696
|
532 |
+
# see gh-20353
|
533 |
+
df = DataFrame({"a": ["x", "y", "z"]})
|
534 |
+
expected_rows = ["x", "y", "z"]
|
535 |
+
expected = "manual header\n" + tm.convert_rows_list_to_csv_str(expected_rows)
|
536 |
+
with tm.ensure_clean("test.txt") as path:
|
537 |
+
with open(path, "w", newline="", encoding="utf-8") as f:
|
538 |
+
f.write("manual header\n")
|
539 |
+
df.to_csv(f, header=None, index=None)
|
540 |
+
|
541 |
+
with open(path, "rb") as f:
|
542 |
+
assert f.read() == bytes(expected, "utf-8")
|
543 |
+
|
544 |
+
@pytest.mark.parametrize("to_infer", [True, False])
|
545 |
+
@pytest.mark.parametrize("read_infer", [True, False])
|
546 |
+
def test_to_csv_compression(
|
547 |
+
self, compression_only, read_infer, to_infer, compression_to_extension
|
548 |
+
):
|
549 |
+
# see gh-15008
|
550 |
+
compression = compression_only
|
551 |
+
|
552 |
+
# We'll complete file extension subsequently.
|
553 |
+
filename = "test."
|
554 |
+
filename += compression_to_extension[compression]
|
555 |
+
|
556 |
+
df = DataFrame({"A": [1]})
|
557 |
+
|
558 |
+
to_compression = "infer" if to_infer else compression
|
559 |
+
read_compression = "infer" if read_infer else compression
|
560 |
+
|
561 |
+
with tm.ensure_clean(filename) as path:
|
562 |
+
df.to_csv(path, compression=to_compression)
|
563 |
+
result = pd.read_csv(path, index_col=0, compression=read_compression)
|
564 |
+
tm.assert_frame_equal(result, df)
|
565 |
+
|
566 |
+
def test_to_csv_compression_dict(self, compression_only):
|
567 |
+
# GH 26023
|
568 |
+
method = compression_only
|
569 |
+
df = DataFrame({"ABC": [1]})
|
570 |
+
filename = "to_csv_compress_as_dict."
|
571 |
+
extension = {
|
572 |
+
"gzip": "gz",
|
573 |
+
"zstd": "zst",
|
574 |
+
}.get(method, method)
|
575 |
+
filename += extension
|
576 |
+
with tm.ensure_clean(filename) as path:
|
577 |
+
df.to_csv(path, compression={"method": method})
|
578 |
+
read_df = pd.read_csv(path, index_col=0)
|
579 |
+
tm.assert_frame_equal(read_df, df)
|
580 |
+
|
581 |
+
def test_to_csv_compression_dict_no_method_raises(self):
|
582 |
+
# GH 26023
|
583 |
+
df = DataFrame({"ABC": [1]})
|
584 |
+
compression = {"some_option": True}
|
585 |
+
msg = "must have key 'method'"
|
586 |
+
|
587 |
+
with tm.ensure_clean("out.zip") as path:
|
588 |
+
with pytest.raises(ValueError, match=msg):
|
589 |
+
df.to_csv(path, compression=compression)
|
590 |
+
|
591 |
+
@pytest.mark.parametrize("compression", ["zip", "infer"])
|
592 |
+
@pytest.mark.parametrize("archive_name", ["test_to_csv.csv", "test_to_csv.zip"])
|
593 |
+
def test_to_csv_zip_arguments(self, compression, archive_name):
|
594 |
+
# GH 26023
|
595 |
+
df = DataFrame({"ABC": [1]})
|
596 |
+
with tm.ensure_clean("to_csv_archive_name.zip") as path:
|
597 |
+
df.to_csv(
|
598 |
+
path, compression={"method": compression, "archive_name": archive_name}
|
599 |
+
)
|
600 |
+
with ZipFile(path) as zp:
|
601 |
+
assert len(zp.filelist) == 1
|
602 |
+
archived_file = zp.filelist[0].filename
|
603 |
+
assert archived_file == archive_name
|
604 |
+
|
605 |
+
@pytest.mark.parametrize(
|
606 |
+
"filename,expected_arcname",
|
607 |
+
[
|
608 |
+
("archive.csv", "archive.csv"),
|
609 |
+
("archive.tsv", "archive.tsv"),
|
610 |
+
("archive.csv.zip", "archive.csv"),
|
611 |
+
("archive.tsv.zip", "archive.tsv"),
|
612 |
+
("archive.zip", "archive"),
|
613 |
+
],
|
614 |
+
)
|
615 |
+
def test_to_csv_zip_infer_name(self, tmp_path, filename, expected_arcname):
|
616 |
+
# GH 39465
|
617 |
+
df = DataFrame({"ABC": [1]})
|
618 |
+
path = tmp_path / filename
|
619 |
+
df.to_csv(path, compression="zip")
|
620 |
+
with ZipFile(path) as zp:
|
621 |
+
assert len(zp.filelist) == 1
|
622 |
+
archived_file = zp.filelist[0].filename
|
623 |
+
assert archived_file == expected_arcname
|
624 |
+
|
625 |
+
@pytest.mark.parametrize("df_new_type", ["Int64"])
|
626 |
+
def test_to_csv_na_rep_long_string(self, df_new_type):
|
627 |
+
# see gh-25099
|
628 |
+
df = DataFrame({"c": [float("nan")] * 3})
|
629 |
+
df = df.astype(df_new_type)
|
630 |
+
expected_rows = ["c", "mynull", "mynull", "mynull"]
|
631 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
632 |
+
|
633 |
+
result = df.to_csv(index=False, na_rep="mynull", encoding="ascii")
|
634 |
+
|
635 |
+
assert expected == result
|
636 |
+
|
637 |
+
def test_to_csv_timedelta_precision(self):
|
638 |
+
# GH 6783
|
639 |
+
s = pd.Series([1, 1]).astype("timedelta64[ns]")
|
640 |
+
buf = io.StringIO()
|
641 |
+
s.to_csv(buf)
|
642 |
+
result = buf.getvalue()
|
643 |
+
expected_rows = [
|
644 |
+
",0",
|
645 |
+
"0,0 days 00:00:00.000000001",
|
646 |
+
"1,0 days 00:00:00.000000001",
|
647 |
+
]
|
648 |
+
expected = tm.convert_rows_list_to_csv_str(expected_rows)
|
649 |
+
assert result == expected
|
650 |
+
|
651 |
+
def test_na_rep_truncated(self):
|
652 |
+
# https://github.com/pandas-dev/pandas/issues/31447
|
653 |
+
result = pd.Series(range(8, 12)).to_csv(na_rep="-")
|
654 |
+
expected = tm.convert_rows_list_to_csv_str([",0", "0,8", "1,9", "2,10", "3,11"])
|
655 |
+
assert result == expected
|
656 |
+
|
657 |
+
result = pd.Series([True, False]).to_csv(na_rep="nan")
|
658 |
+
expected = tm.convert_rows_list_to_csv_str([",0", "0,True", "1,False"])
|
659 |
+
assert result == expected
|
660 |
+
|
661 |
+
result = pd.Series([1.1, 2.2]).to_csv(na_rep=".")
|
662 |
+
expected = tm.convert_rows_list_to_csv_str([",0", "0,1.1", "1,2.2"])
|
663 |
+
assert result == expected
|
664 |
+
|
665 |
+
@pytest.mark.parametrize("errors", ["surrogatepass", "ignore", "replace"])
|
666 |
+
def test_to_csv_errors(self, errors):
|
667 |
+
# GH 22610
|
668 |
+
data = ["\ud800foo"]
|
669 |
+
ser = pd.Series(data, index=Index(data, dtype=object), dtype=object)
|
670 |
+
with tm.ensure_clean("test.csv") as path:
|
671 |
+
ser.to_csv(path, errors=errors)
|
672 |
+
# No use in reading back the data as it is not the same anymore
|
673 |
+
# due to the error handling
|
674 |
+
|
675 |
+
@pytest.mark.parametrize("mode", ["wb", "w"])
|
676 |
+
def test_to_csv_binary_handle(self, mode):
|
677 |
+
"""
|
678 |
+
Binary file objects should work (if 'mode' contains a 'b') or even without
|
679 |
+
it in most cases.
|
680 |
+
|
681 |
+
GH 35058 and GH 19827
|
682 |
+
"""
|
683 |
+
df = DataFrame(
|
684 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
685 |
+
columns=Index(list("ABCD")),
|
686 |
+
index=Index([f"i-{i}" for i in range(30)]),
|
687 |
+
)
|
688 |
+
with tm.ensure_clean() as path:
|
689 |
+
with open(path, mode="w+b") as handle:
|
690 |
+
df.to_csv(handle, mode=mode)
|
691 |
+
tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
|
692 |
+
|
693 |
+
@pytest.mark.parametrize("mode", ["wb", "w"])
|
694 |
+
def test_to_csv_encoding_binary_handle(self, mode):
|
695 |
+
"""
|
696 |
+
Binary file objects should honor a specified encoding.
|
697 |
+
|
698 |
+
GH 23854 and GH 13068 with binary handles
|
699 |
+
"""
|
700 |
+
# example from GH 23854
|
701 |
+
content = "a, b, 🐟".encode("utf-8-sig")
|
702 |
+
buffer = io.BytesIO(content)
|
703 |
+
df = pd.read_csv(buffer, encoding="utf-8-sig")
|
704 |
+
|
705 |
+
buffer = io.BytesIO()
|
706 |
+
df.to_csv(buffer, mode=mode, encoding="utf-8-sig", index=False)
|
707 |
+
buffer.seek(0) # tests whether file handle wasn't closed
|
708 |
+
assert buffer.getvalue().startswith(content)
|
709 |
+
|
710 |
+
# example from GH 13068
|
711 |
+
with tm.ensure_clean() as path:
|
712 |
+
with open(path, "w+b") as handle:
|
713 |
+
DataFrame().to_csv(handle, mode=mode, encoding="utf-8-sig")
|
714 |
+
|
715 |
+
handle.seek(0)
|
716 |
+
assert handle.read().startswith(b'\xef\xbb\xbf""')
|
717 |
+
|
718 |
+
|
719 |
+
def test_to_csv_iterative_compression_name(compression):
|
720 |
+
# GH 38714
|
721 |
+
df = DataFrame(
|
722 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
723 |
+
columns=Index(list("ABCD")),
|
724 |
+
index=Index([f"i-{i}" for i in range(30)]),
|
725 |
+
)
|
726 |
+
with tm.ensure_clean() as path:
|
727 |
+
df.to_csv(path, compression=compression, chunksize=1)
|
728 |
+
tm.assert_frame_equal(
|
729 |
+
pd.read_csv(path, compression=compression, index_col=0), df
|
730 |
+
)
|
731 |
+
|
732 |
+
|
733 |
+
def test_to_csv_iterative_compression_buffer(compression):
|
734 |
+
# GH 38714
|
735 |
+
df = DataFrame(
|
736 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
737 |
+
columns=Index(list("ABCD")),
|
738 |
+
index=Index([f"i-{i}" for i in range(30)]),
|
739 |
+
)
|
740 |
+
with io.BytesIO() as buffer:
|
741 |
+
df.to_csv(buffer, compression=compression, chunksize=1)
|
742 |
+
buffer.seek(0)
|
743 |
+
tm.assert_frame_equal(
|
744 |
+
pd.read_csv(buffer, compression=compression, index_col=0), df
|
745 |
+
)
|
746 |
+
assert not buffer.closed
|
747 |
+
|
748 |
+
|
749 |
+
def test_to_csv_pos_args_deprecation():
|
750 |
+
# GH-54229
|
751 |
+
df = DataFrame({"a": [1, 2, 3]})
|
752 |
+
msg = (
|
753 |
+
r"Starting with pandas version 3.0 all arguments of to_csv except for the "
|
754 |
+
r"argument 'path_or_buf' will be keyword-only."
|
755 |
+
)
|
756 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
757 |
+
buffer = io.BytesIO()
|
758 |
+
df.to_csv(buffer, ";")
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/formats/test_to_excel.py
ADDED
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests formatting as writer-agnostic ExcelCells
|
2 |
+
|
3 |
+
ExcelFormatter is tested implicitly in pandas/tests/io/excel
|
4 |
+
"""
|
5 |
+
import string
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas.errors import CSSWarning
|
10 |
+
|
11 |
+
import pandas._testing as tm
|
12 |
+
|
13 |
+
from pandas.io.formats.excel import (
|
14 |
+
CssExcelCell,
|
15 |
+
CSSToExcelConverter,
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.mark.parametrize(
|
20 |
+
"css,expected",
|
21 |
+
[
|
22 |
+
# FONT
|
23 |
+
# - name
|
24 |
+
("font-family: foo,bar", {"font": {"name": "foo"}}),
|
25 |
+
('font-family: "foo bar",baz', {"font": {"name": "foo bar"}}),
|
26 |
+
("font-family: foo,\nbar", {"font": {"name": "foo"}}),
|
27 |
+
("font-family: foo, bar, baz", {"font": {"name": "foo"}}),
|
28 |
+
("font-family: bar, foo", {"font": {"name": "bar"}}),
|
29 |
+
("font-family: 'foo bar', baz", {"font": {"name": "foo bar"}}),
|
30 |
+
("font-family: 'foo \\'bar', baz", {"font": {"name": "foo 'bar"}}),
|
31 |
+
('font-family: "foo \\"bar", baz', {"font": {"name": 'foo "bar'}}),
|
32 |
+
('font-family: "foo ,bar", baz', {"font": {"name": "foo ,bar"}}),
|
33 |
+
# - family
|
34 |
+
("font-family: serif", {"font": {"name": "serif", "family": 1}}),
|
35 |
+
("font-family: Serif", {"font": {"name": "serif", "family": 1}}),
|
36 |
+
("font-family: roman, serif", {"font": {"name": "roman", "family": 1}}),
|
37 |
+
("font-family: roman, sans-serif", {"font": {"name": "roman", "family": 2}}),
|
38 |
+
("font-family: roman, sans serif", {"font": {"name": "roman"}}),
|
39 |
+
("font-family: roman, sansserif", {"font": {"name": "roman"}}),
|
40 |
+
("font-family: roman, cursive", {"font": {"name": "roman", "family": 4}}),
|
41 |
+
("font-family: roman, fantasy", {"font": {"name": "roman", "family": 5}}),
|
42 |
+
# - size
|
43 |
+
("font-size: 1em", {"font": {"size": 12}}),
|
44 |
+
("font-size: xx-small", {"font": {"size": 6}}),
|
45 |
+
("font-size: x-small", {"font": {"size": 7.5}}),
|
46 |
+
("font-size: small", {"font": {"size": 9.6}}),
|
47 |
+
("font-size: medium", {"font": {"size": 12}}),
|
48 |
+
("font-size: large", {"font": {"size": 13.5}}),
|
49 |
+
("font-size: x-large", {"font": {"size": 18}}),
|
50 |
+
("font-size: xx-large", {"font": {"size": 24}}),
|
51 |
+
("font-size: 50%", {"font": {"size": 6}}),
|
52 |
+
# - bold
|
53 |
+
("font-weight: 100", {"font": {"bold": False}}),
|
54 |
+
("font-weight: 200", {"font": {"bold": False}}),
|
55 |
+
("font-weight: 300", {"font": {"bold": False}}),
|
56 |
+
("font-weight: 400", {"font": {"bold": False}}),
|
57 |
+
("font-weight: normal", {"font": {"bold": False}}),
|
58 |
+
("font-weight: lighter", {"font": {"bold": False}}),
|
59 |
+
("font-weight: bold", {"font": {"bold": True}}),
|
60 |
+
("font-weight: bolder", {"font": {"bold": True}}),
|
61 |
+
("font-weight: 700", {"font": {"bold": True}}),
|
62 |
+
("font-weight: 800", {"font": {"bold": True}}),
|
63 |
+
("font-weight: 900", {"font": {"bold": True}}),
|
64 |
+
# - italic
|
65 |
+
("font-style: italic", {"font": {"italic": True}}),
|
66 |
+
("font-style: oblique", {"font": {"italic": True}}),
|
67 |
+
# - underline
|
68 |
+
("text-decoration: underline", {"font": {"underline": "single"}}),
|
69 |
+
("text-decoration: overline", {}),
|
70 |
+
("text-decoration: none", {}),
|
71 |
+
# - strike
|
72 |
+
("text-decoration: line-through", {"font": {"strike": True}}),
|
73 |
+
(
|
74 |
+
"text-decoration: underline line-through",
|
75 |
+
{"font": {"strike": True, "underline": "single"}},
|
76 |
+
),
|
77 |
+
(
|
78 |
+
"text-decoration: underline; text-decoration: line-through",
|
79 |
+
{"font": {"strike": True}},
|
80 |
+
),
|
81 |
+
# - color
|
82 |
+
("color: red", {"font": {"color": "FF0000"}}),
|
83 |
+
("color: #ff0000", {"font": {"color": "FF0000"}}),
|
84 |
+
("color: #f0a", {"font": {"color": "FF00AA"}}),
|
85 |
+
# - shadow
|
86 |
+
("text-shadow: none", {"font": {"shadow": False}}),
|
87 |
+
("text-shadow: 0px -0em 0px #CCC", {"font": {"shadow": False}}),
|
88 |
+
("text-shadow: 0px -0em 0px #999", {"font": {"shadow": False}}),
|
89 |
+
("text-shadow: 0px -0em 0px", {"font": {"shadow": False}}),
|
90 |
+
("text-shadow: 2px -0em 0px #CCC", {"font": {"shadow": True}}),
|
91 |
+
("text-shadow: 0px -2em 0px #CCC", {"font": {"shadow": True}}),
|
92 |
+
("text-shadow: 0px -0em 2px #CCC", {"font": {"shadow": True}}),
|
93 |
+
("text-shadow: 0px -0em 2px", {"font": {"shadow": True}}),
|
94 |
+
("text-shadow: 0px -2em", {"font": {"shadow": True}}),
|
95 |
+
# FILL
|
96 |
+
# - color, fillType
|
97 |
+
(
|
98 |
+
"background-color: red",
|
99 |
+
{"fill": {"fgColor": "FF0000", "patternType": "solid"}},
|
100 |
+
),
|
101 |
+
(
|
102 |
+
"background-color: #ff0000",
|
103 |
+
{"fill": {"fgColor": "FF0000", "patternType": "solid"}},
|
104 |
+
),
|
105 |
+
(
|
106 |
+
"background-color: #f0a",
|
107 |
+
{"fill": {"fgColor": "FF00AA", "patternType": "solid"}},
|
108 |
+
),
|
109 |
+
# BORDER
|
110 |
+
# - style
|
111 |
+
(
|
112 |
+
"border-style: solid",
|
113 |
+
{
|
114 |
+
"border": {
|
115 |
+
"top": {"style": "medium"},
|
116 |
+
"bottom": {"style": "medium"},
|
117 |
+
"left": {"style": "medium"},
|
118 |
+
"right": {"style": "medium"},
|
119 |
+
}
|
120 |
+
},
|
121 |
+
),
|
122 |
+
(
|
123 |
+
"border-style: solid; border-width: thin",
|
124 |
+
{
|
125 |
+
"border": {
|
126 |
+
"top": {"style": "thin"},
|
127 |
+
"bottom": {"style": "thin"},
|
128 |
+
"left": {"style": "thin"},
|
129 |
+
"right": {"style": "thin"},
|
130 |
+
}
|
131 |
+
},
|
132 |
+
),
|
133 |
+
(
|
134 |
+
"border-top-style: solid; border-top-width: thin",
|
135 |
+
{"border": {"top": {"style": "thin"}}},
|
136 |
+
),
|
137 |
+
(
|
138 |
+
"border-top-style: solid; border-top-width: 1pt",
|
139 |
+
{"border": {"top": {"style": "thin"}}},
|
140 |
+
),
|
141 |
+
("border-top-style: solid", {"border": {"top": {"style": "medium"}}}),
|
142 |
+
(
|
143 |
+
"border-top-style: solid; border-top-width: medium",
|
144 |
+
{"border": {"top": {"style": "medium"}}},
|
145 |
+
),
|
146 |
+
(
|
147 |
+
"border-top-style: solid; border-top-width: 2pt",
|
148 |
+
{"border": {"top": {"style": "medium"}}},
|
149 |
+
),
|
150 |
+
(
|
151 |
+
"border-top-style: solid; border-top-width: thick",
|
152 |
+
{"border": {"top": {"style": "thick"}}},
|
153 |
+
),
|
154 |
+
(
|
155 |
+
"border-top-style: solid; border-top-width: 4pt",
|
156 |
+
{"border": {"top": {"style": "thick"}}},
|
157 |
+
),
|
158 |
+
(
|
159 |
+
"border-top-style: dotted",
|
160 |
+
{"border": {"top": {"style": "mediumDashDotDot"}}},
|
161 |
+
),
|
162 |
+
(
|
163 |
+
"border-top-style: dotted; border-top-width: thin",
|
164 |
+
{"border": {"top": {"style": "dotted"}}},
|
165 |
+
),
|
166 |
+
("border-top-style: dashed", {"border": {"top": {"style": "mediumDashed"}}}),
|
167 |
+
(
|
168 |
+
"border-top-style: dashed; border-top-width: thin",
|
169 |
+
{"border": {"top": {"style": "dashed"}}},
|
170 |
+
),
|
171 |
+
("border-top-style: double", {"border": {"top": {"style": "double"}}}),
|
172 |
+
# - color
|
173 |
+
(
|
174 |
+
"border-style: solid; border-color: #0000ff",
|
175 |
+
{
|
176 |
+
"border": {
|
177 |
+
"top": {"style": "medium", "color": "0000FF"},
|
178 |
+
"right": {"style": "medium", "color": "0000FF"},
|
179 |
+
"bottom": {"style": "medium", "color": "0000FF"},
|
180 |
+
"left": {"style": "medium", "color": "0000FF"},
|
181 |
+
}
|
182 |
+
},
|
183 |
+
),
|
184 |
+
(
|
185 |
+
"border-top-style: double; border-top-color: blue",
|
186 |
+
{"border": {"top": {"style": "double", "color": "0000FF"}}},
|
187 |
+
),
|
188 |
+
(
|
189 |
+
"border-top-style: solid; border-top-color: #06c",
|
190 |
+
{"border": {"top": {"style": "medium", "color": "0066CC"}}},
|
191 |
+
),
|
192 |
+
(
|
193 |
+
"border-top-color: blue",
|
194 |
+
{"border": {"top": {"color": "0000FF", "style": "none"}}},
|
195 |
+
),
|
196 |
+
# ALIGNMENT
|
197 |
+
# - horizontal
|
198 |
+
("text-align: center", {"alignment": {"horizontal": "center"}}),
|
199 |
+
("text-align: left", {"alignment": {"horizontal": "left"}}),
|
200 |
+
("text-align: right", {"alignment": {"horizontal": "right"}}),
|
201 |
+
("text-align: justify", {"alignment": {"horizontal": "justify"}}),
|
202 |
+
# - vertical
|
203 |
+
("vertical-align: top", {"alignment": {"vertical": "top"}}),
|
204 |
+
("vertical-align: text-top", {"alignment": {"vertical": "top"}}),
|
205 |
+
("vertical-align: middle", {"alignment": {"vertical": "center"}}),
|
206 |
+
("vertical-align: bottom", {"alignment": {"vertical": "bottom"}}),
|
207 |
+
("vertical-align: text-bottom", {"alignment": {"vertical": "bottom"}}),
|
208 |
+
# - wrap_text
|
209 |
+
("white-space: nowrap", {"alignment": {"wrap_text": False}}),
|
210 |
+
("white-space: pre", {"alignment": {"wrap_text": False}}),
|
211 |
+
("white-space: pre-line", {"alignment": {"wrap_text": False}}),
|
212 |
+
("white-space: normal", {"alignment": {"wrap_text": True}}),
|
213 |
+
# NUMBER FORMAT
|
214 |
+
("number-format: 0%", {"number_format": {"format_code": "0%"}}),
|
215 |
+
(
|
216 |
+
"number-format: 0§[Red](0)§-§@;",
|
217 |
+
{"number_format": {"format_code": "0;[red](0);-;@"}}, # GH 46152
|
218 |
+
),
|
219 |
+
],
|
220 |
+
)
|
221 |
+
def test_css_to_excel(css, expected):
|
222 |
+
convert = CSSToExcelConverter()
|
223 |
+
assert expected == convert(css)
|
224 |
+
|
225 |
+
|
226 |
+
def test_css_to_excel_multiple():
|
227 |
+
convert = CSSToExcelConverter()
|
228 |
+
actual = convert(
|
229 |
+
"""
|
230 |
+
font-weight: bold;
|
231 |
+
text-decoration: underline;
|
232 |
+
color: red;
|
233 |
+
border-width: thin;
|
234 |
+
text-align: center;
|
235 |
+
vertical-align: top;
|
236 |
+
unused: something;
|
237 |
+
"""
|
238 |
+
)
|
239 |
+
assert {
|
240 |
+
"font": {"bold": True, "underline": "single", "color": "FF0000"},
|
241 |
+
"border": {
|
242 |
+
"top": {"style": "thin"},
|
243 |
+
"right": {"style": "thin"},
|
244 |
+
"bottom": {"style": "thin"},
|
245 |
+
"left": {"style": "thin"},
|
246 |
+
},
|
247 |
+
"alignment": {"horizontal": "center", "vertical": "top"},
|
248 |
+
} == actual
|
249 |
+
|
250 |
+
|
251 |
+
@pytest.mark.parametrize(
|
252 |
+
"css,inherited,expected",
|
253 |
+
[
|
254 |
+
("font-weight: bold", "", {"font": {"bold": True}}),
|
255 |
+
("", "font-weight: bold", {"font": {"bold": True}}),
|
256 |
+
(
|
257 |
+
"font-weight: bold",
|
258 |
+
"font-style: italic",
|
259 |
+
{"font": {"bold": True, "italic": True}},
|
260 |
+
),
|
261 |
+
("font-style: normal", "font-style: italic", {"font": {"italic": False}}),
|
262 |
+
("font-style: inherit", "", {}),
|
263 |
+
(
|
264 |
+
"font-style: normal; font-style: inherit",
|
265 |
+
"font-style: italic",
|
266 |
+
{"font": {"italic": True}},
|
267 |
+
),
|
268 |
+
],
|
269 |
+
)
|
270 |
+
def test_css_to_excel_inherited(css, inherited, expected):
|
271 |
+
convert = CSSToExcelConverter(inherited)
|
272 |
+
assert expected == convert(css)
|
273 |
+
|
274 |
+
|
275 |
+
@pytest.mark.parametrize(
|
276 |
+
"input_color,output_color",
|
277 |
+
(
|
278 |
+
list(CSSToExcelConverter.NAMED_COLORS.items())
|
279 |
+
+ [("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()]
|
280 |
+
+ [("#F0F", "FF00FF"), ("#ABC", "AABBCC")]
|
281 |
+
),
|
282 |
+
)
|
283 |
+
def test_css_to_excel_good_colors(input_color, output_color):
|
284 |
+
# see gh-18392
|
285 |
+
css = (
|
286 |
+
f"border-top-color: {input_color}; "
|
287 |
+
f"border-right-color: {input_color}; "
|
288 |
+
f"border-bottom-color: {input_color}; "
|
289 |
+
f"border-left-color: {input_color}; "
|
290 |
+
f"background-color: {input_color}; "
|
291 |
+
f"color: {input_color}"
|
292 |
+
)
|
293 |
+
|
294 |
+
expected = {}
|
295 |
+
|
296 |
+
expected["fill"] = {"patternType": "solid", "fgColor": output_color}
|
297 |
+
|
298 |
+
expected["font"] = {"color": output_color}
|
299 |
+
|
300 |
+
expected["border"] = {
|
301 |
+
k: {"color": output_color, "style": "none"}
|
302 |
+
for k in ("top", "right", "bottom", "left")
|
303 |
+
}
|
304 |
+
|
305 |
+
with tm.assert_produces_warning(None):
|
306 |
+
convert = CSSToExcelConverter()
|
307 |
+
assert expected == convert(css)
|
308 |
+
|
309 |
+
|
310 |
+
@pytest.mark.parametrize("input_color", [None, "not-a-color"])
|
311 |
+
def test_css_to_excel_bad_colors(input_color):
|
312 |
+
# see gh-18392
|
313 |
+
css = (
|
314 |
+
f"border-top-color: {input_color}; "
|
315 |
+
f"border-right-color: {input_color}; "
|
316 |
+
f"border-bottom-color: {input_color}; "
|
317 |
+
f"border-left-color: {input_color}; "
|
318 |
+
f"background-color: {input_color}; "
|
319 |
+
f"color: {input_color}"
|
320 |
+
)
|
321 |
+
|
322 |
+
expected = {}
|
323 |
+
|
324 |
+
if input_color is not None:
|
325 |
+
expected["fill"] = {"patternType": "solid"}
|
326 |
+
|
327 |
+
with tm.assert_produces_warning(CSSWarning):
|
328 |
+
convert = CSSToExcelConverter()
|
329 |
+
assert expected == convert(css)
|
330 |
+
|
331 |
+
|
332 |
+
def tests_css_named_colors_valid():
|
333 |
+
upper_hexs = set(map(str.upper, string.hexdigits))
|
334 |
+
for color in CSSToExcelConverter.NAMED_COLORS.values():
|
335 |
+
assert len(color) == 6 and all(c in upper_hexs for c in color)
|
336 |
+
|
337 |
+
|
338 |
+
def test_css_named_colors_from_mpl_present():
|
339 |
+
mpl_colors = pytest.importorskip("matplotlib.colors")
|
340 |
+
|
341 |
+
pd_colors = CSSToExcelConverter.NAMED_COLORS
|
342 |
+
for name, color in mpl_colors.CSS4_COLORS.items():
|
343 |
+
assert name in pd_colors and pd_colors[name] == color[1:]
|
344 |
+
|
345 |
+
|
346 |
+
@pytest.mark.parametrize(
|
347 |
+
"styles,expected",
|
348 |
+
[
|
349 |
+
([("color", "green"), ("color", "red")], "color: red;"),
|
350 |
+
([("font-weight", "bold"), ("font-weight", "normal")], "font-weight: normal;"),
|
351 |
+
([("text-align", "center"), ("TEXT-ALIGN", "right")], "text-align: right;"),
|
352 |
+
],
|
353 |
+
)
|
354 |
+
def test_css_excel_cell_precedence(styles, expected):
|
355 |
+
"""It applies favors latter declarations over former declarations"""
|
356 |
+
# See GH 47371
|
357 |
+
converter = CSSToExcelConverter()
|
358 |
+
converter._call_cached.cache_clear()
|
359 |
+
css_styles = {(0, 0): styles}
|
360 |
+
cell = CssExcelCell(
|
361 |
+
row=0,
|
362 |
+
col=0,
|
363 |
+
val="",
|
364 |
+
style=None,
|
365 |
+
css_styles=css_styles,
|
366 |
+
css_row=0,
|
367 |
+
css_col=0,
|
368 |
+
css_converter=converter,
|
369 |
+
)
|
370 |
+
converter._call_cached.cache_clear()
|
371 |
+
|
372 |
+
assert cell.style == converter(expected)
|
373 |
+
|
374 |
+
|
375 |
+
@pytest.mark.parametrize(
|
376 |
+
"styles,cache_hits,cache_misses",
|
377 |
+
[
|
378 |
+
([[("color", "green"), ("color", "red"), ("color", "green")]], 0, 1),
|
379 |
+
(
|
380 |
+
[
|
381 |
+
[("font-weight", "bold")],
|
382 |
+
[("font-weight", "normal"), ("font-weight", "bold")],
|
383 |
+
],
|
384 |
+
1,
|
385 |
+
1,
|
386 |
+
),
|
387 |
+
([[("text-align", "center")], [("TEXT-ALIGN", "center")]], 1, 1),
|
388 |
+
(
|
389 |
+
[
|
390 |
+
[("font-weight", "bold"), ("text-align", "center")],
|
391 |
+
[("font-weight", "bold"), ("text-align", "left")],
|
392 |
+
],
|
393 |
+
0,
|
394 |
+
2,
|
395 |
+
),
|
396 |
+
(
|
397 |
+
[
|
398 |
+
[("font-weight", "bold"), ("text-align", "center")],
|
399 |
+
[("font-weight", "bold"), ("text-align", "left")],
|
400 |
+
[("font-weight", "bold"), ("text-align", "center")],
|
401 |
+
],
|
402 |
+
1,
|
403 |
+
2,
|
404 |
+
),
|
405 |
+
],
|
406 |
+
)
|
407 |
+
def test_css_excel_cell_cache(styles, cache_hits, cache_misses):
|
408 |
+
"""It caches unique cell styles"""
|
409 |
+
# See GH 47371
|
410 |
+
converter = CSSToExcelConverter()
|
411 |
+
converter._call_cached.cache_clear()
|
412 |
+
|
413 |
+
css_styles = {(0, i): _style for i, _style in enumerate(styles)}
|
414 |
+
for css_row, css_col in css_styles:
|
415 |
+
CssExcelCell(
|
416 |
+
row=0,
|
417 |
+
col=0,
|
418 |
+
val="",
|
419 |
+
style=None,
|
420 |
+
css_styles=css_styles,
|
421 |
+
css_row=css_row,
|
422 |
+
css_col=css_col,
|
423 |
+
css_converter=converter,
|
424 |
+
)
|
425 |
+
cache_info = converter._call_cached.cache_info()
|
426 |
+
converter._call_cached.cache_clear()
|
427 |
+
|
428 |
+
assert cache_info.hits == cache_hits
|
429 |
+
assert cache_info.misses == cache_misses
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py
ADDED
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
self-contained to write legacy storage pickle files
|
3 |
+
|
4 |
+
To use this script. Create an environment where you want
|
5 |
+
generate pickles, say its for 0.20.3, with your pandas clone
|
6 |
+
in ~/pandas
|
7 |
+
|
8 |
+
. activate pandas_0.20.3
|
9 |
+
cd ~/pandas/pandas
|
10 |
+
|
11 |
+
$ python -m tests.io.generate_legacy_storage_files \
|
12 |
+
tests/io/data/legacy_pickle/0.20.3/ pickle
|
13 |
+
|
14 |
+
This script generates a storage file for the current arch, system,
|
15 |
+
and python version
|
16 |
+
pandas version: 0.20.3
|
17 |
+
output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/
|
18 |
+
storage format: pickle
|
19 |
+
created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle
|
20 |
+
|
21 |
+
The idea here is you are using the *current* version of the
|
22 |
+
generate_legacy_storage_files with an *older* version of pandas to
|
23 |
+
generate a pickle file. We will then check this file into a current
|
24 |
+
branch, and test using test_pickle.py. This will load the *older*
|
25 |
+
pickles and test versus the current data that is generated
|
26 |
+
(with main). These are then compared.
|
27 |
+
|
28 |
+
If we have cases where we changed the signature (e.g. we renamed
|
29 |
+
offset -> freq in Timestamp). Then we have to conditionally execute
|
30 |
+
in the generate_legacy_storage_files.py to make it
|
31 |
+
run under the older AND the newer version.
|
32 |
+
|
33 |
+
"""
|
34 |
+
|
35 |
+
from datetime import timedelta
|
36 |
+
import os
|
37 |
+
import pickle
|
38 |
+
import platform as pl
|
39 |
+
import sys
|
40 |
+
|
41 |
+
# Remove script directory from path, otherwise Python will try to
|
42 |
+
# import the JSON test directory as the json module
|
43 |
+
sys.path.pop(0)
|
44 |
+
|
45 |
+
import numpy as np
|
46 |
+
|
47 |
+
import pandas
|
48 |
+
from pandas import (
|
49 |
+
Categorical,
|
50 |
+
DataFrame,
|
51 |
+
Index,
|
52 |
+
MultiIndex,
|
53 |
+
NaT,
|
54 |
+
Period,
|
55 |
+
RangeIndex,
|
56 |
+
Series,
|
57 |
+
Timestamp,
|
58 |
+
bdate_range,
|
59 |
+
date_range,
|
60 |
+
interval_range,
|
61 |
+
period_range,
|
62 |
+
timedelta_range,
|
63 |
+
)
|
64 |
+
from pandas.arrays import SparseArray
|
65 |
+
|
66 |
+
from pandas.tseries.offsets import (
|
67 |
+
FY5253,
|
68 |
+
BusinessDay,
|
69 |
+
BusinessHour,
|
70 |
+
CustomBusinessDay,
|
71 |
+
DateOffset,
|
72 |
+
Day,
|
73 |
+
Easter,
|
74 |
+
Hour,
|
75 |
+
LastWeekOfMonth,
|
76 |
+
Minute,
|
77 |
+
MonthBegin,
|
78 |
+
MonthEnd,
|
79 |
+
QuarterBegin,
|
80 |
+
QuarterEnd,
|
81 |
+
SemiMonthBegin,
|
82 |
+
SemiMonthEnd,
|
83 |
+
Week,
|
84 |
+
WeekOfMonth,
|
85 |
+
YearBegin,
|
86 |
+
YearEnd,
|
87 |
+
)
|
88 |
+
|
89 |
+
|
90 |
+
def _create_sp_series():
|
91 |
+
nan = np.nan
|
92 |
+
|
93 |
+
# nan-based
|
94 |
+
arr = np.arange(15, dtype=np.float64)
|
95 |
+
arr[7:12] = nan
|
96 |
+
arr[-1:] = nan
|
97 |
+
|
98 |
+
bseries = Series(SparseArray(arr, kind="block"))
|
99 |
+
bseries.name = "bseries"
|
100 |
+
return bseries
|
101 |
+
|
102 |
+
|
103 |
+
def _create_sp_tsseries():
|
104 |
+
nan = np.nan
|
105 |
+
|
106 |
+
# nan-based
|
107 |
+
arr = np.arange(15, dtype=np.float64)
|
108 |
+
arr[7:12] = nan
|
109 |
+
arr[-1:] = nan
|
110 |
+
|
111 |
+
date_index = bdate_range("1/1/2011", periods=len(arr))
|
112 |
+
bseries = Series(SparseArray(arr, kind="block"), index=date_index)
|
113 |
+
bseries.name = "btsseries"
|
114 |
+
return bseries
|
115 |
+
|
116 |
+
|
117 |
+
def _create_sp_frame():
|
118 |
+
nan = np.nan
|
119 |
+
|
120 |
+
data = {
|
121 |
+
"A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
|
122 |
+
"B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
|
123 |
+
"C": np.arange(10).astype(np.int64),
|
124 |
+
"D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
|
125 |
+
}
|
126 |
+
|
127 |
+
dates = bdate_range("1/1/2011", periods=10)
|
128 |
+
return DataFrame(data, index=dates).apply(SparseArray)
|
129 |
+
|
130 |
+
|
131 |
+
def create_pickle_data():
|
132 |
+
"""create the pickle data"""
|
133 |
+
data = {
|
134 |
+
"A": [0.0, 1.0, 2.0, 3.0, np.nan],
|
135 |
+
"B": [0, 1, 0, 1, 0],
|
136 |
+
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
|
137 |
+
"D": date_range("1/1/2009", periods=5),
|
138 |
+
"E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
|
139 |
+
}
|
140 |
+
|
141 |
+
scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")}
|
142 |
+
|
143 |
+
index = {
|
144 |
+
"int": Index(np.arange(10)),
|
145 |
+
"date": date_range("20130101", periods=10),
|
146 |
+
"period": period_range("2013-01-01", freq="M", periods=10),
|
147 |
+
"float": Index(np.arange(10, dtype=np.float64)),
|
148 |
+
"uint": Index(np.arange(10, dtype=np.uint64)),
|
149 |
+
"timedelta": timedelta_range("00:00:00", freq="30min", periods=10),
|
150 |
+
}
|
151 |
+
|
152 |
+
index["range"] = RangeIndex(10)
|
153 |
+
|
154 |
+
index["interval"] = interval_range(0, periods=10)
|
155 |
+
|
156 |
+
mi = {
|
157 |
+
"reg2": MultiIndex.from_tuples(
|
158 |
+
tuple(
|
159 |
+
zip(
|
160 |
+
*[
|
161 |
+
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
|
162 |
+
["one", "two", "one", "two", "one", "two", "one", "two"],
|
163 |
+
]
|
164 |
+
)
|
165 |
+
),
|
166 |
+
names=["first", "second"],
|
167 |
+
)
|
168 |
+
}
|
169 |
+
|
170 |
+
series = {
|
171 |
+
"float": Series(data["A"]),
|
172 |
+
"int": Series(data["B"]),
|
173 |
+
"mixed": Series(data["E"]),
|
174 |
+
"ts": Series(
|
175 |
+
np.arange(10).astype(np.int64), index=date_range("20130101", periods=10)
|
176 |
+
),
|
177 |
+
"mi": Series(
|
178 |
+
np.arange(5).astype(np.float64),
|
179 |
+
index=MultiIndex.from_tuples(
|
180 |
+
tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"]
|
181 |
+
),
|
182 |
+
),
|
183 |
+
"dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
|
184 |
+
"cat": Series(Categorical(["foo", "bar", "baz"])),
|
185 |
+
"dt": Series(date_range("20130101", periods=5)),
|
186 |
+
"dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")),
|
187 |
+
"period": Series([Period("2000Q1")] * 5),
|
188 |
+
}
|
189 |
+
|
190 |
+
mixed_dup_df = DataFrame(data)
|
191 |
+
mixed_dup_df.columns = list("ABCDA")
|
192 |
+
frame = {
|
193 |
+
"float": DataFrame({"A": series["float"], "B": series["float"] + 1}),
|
194 |
+
"int": DataFrame({"A": series["int"], "B": series["int"] + 1}),
|
195 |
+
"mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
|
196 |
+
"mi": DataFrame(
|
197 |
+
{"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)},
|
198 |
+
index=MultiIndex.from_tuples(
|
199 |
+
tuple(
|
200 |
+
zip(
|
201 |
+
*[
|
202 |
+
["bar", "bar", "baz", "baz", "baz"],
|
203 |
+
["one", "two", "one", "two", "three"],
|
204 |
+
]
|
205 |
+
)
|
206 |
+
),
|
207 |
+
names=["first", "second"],
|
208 |
+
),
|
209 |
+
),
|
210 |
+
"dup": DataFrame(
|
211 |
+
np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"]
|
212 |
+
),
|
213 |
+
"cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}),
|
214 |
+
"cat_and_float": DataFrame(
|
215 |
+
{
|
216 |
+
"A": Categorical(["foo", "bar", "baz"]),
|
217 |
+
"B": np.arange(3).astype(np.int64),
|
218 |
+
}
|
219 |
+
),
|
220 |
+
"mixed_dup": mixed_dup_df,
|
221 |
+
"dt_mixed_tzs": DataFrame(
|
222 |
+
{
|
223 |
+
"A": Timestamp("20130102", tz="US/Eastern"),
|
224 |
+
"B": Timestamp("20130603", tz="CET"),
|
225 |
+
},
|
226 |
+
index=range(5),
|
227 |
+
),
|
228 |
+
"dt_mixed2_tzs": DataFrame(
|
229 |
+
{
|
230 |
+
"A": Timestamp("20130102", tz="US/Eastern"),
|
231 |
+
"B": Timestamp("20130603", tz="CET"),
|
232 |
+
"C": Timestamp("20130603", tz="UTC"),
|
233 |
+
},
|
234 |
+
index=range(5),
|
235 |
+
),
|
236 |
+
}
|
237 |
+
|
238 |
+
cat = {
|
239 |
+
"int8": Categorical(list("abcdefg")),
|
240 |
+
"int16": Categorical(np.arange(1000)),
|
241 |
+
"int32": Categorical(np.arange(10000)),
|
242 |
+
}
|
243 |
+
|
244 |
+
timestamp = {
|
245 |
+
"normal": Timestamp("2011-01-01"),
|
246 |
+
"nat": NaT,
|
247 |
+
"tz": Timestamp("2011-01-01", tz="US/Eastern"),
|
248 |
+
}
|
249 |
+
|
250 |
+
off = {
|
251 |
+
"DateOffset": DateOffset(years=1),
|
252 |
+
"DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
|
253 |
+
"BusinessDay": BusinessDay(offset=timedelta(seconds=9)),
|
254 |
+
"BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"),
|
255 |
+
"CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"),
|
256 |
+
"SemiMonthBegin": SemiMonthBegin(day_of_month=9),
|
257 |
+
"SemiMonthEnd": SemiMonthEnd(day_of_month=24),
|
258 |
+
"MonthBegin": MonthBegin(1),
|
259 |
+
"MonthEnd": MonthEnd(1),
|
260 |
+
"QuarterBegin": QuarterBegin(1),
|
261 |
+
"QuarterEnd": QuarterEnd(1),
|
262 |
+
"Day": Day(1),
|
263 |
+
"YearBegin": YearBegin(1),
|
264 |
+
"YearEnd": YearEnd(1),
|
265 |
+
"Week": Week(1),
|
266 |
+
"Week_Tues": Week(2, normalize=False, weekday=1),
|
267 |
+
"WeekOfMonth": WeekOfMonth(week=3, weekday=4),
|
268 |
+
"LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3),
|
269 |
+
"FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
|
270 |
+
"Easter": Easter(),
|
271 |
+
"Hour": Hour(1),
|
272 |
+
"Minute": Minute(1),
|
273 |
+
}
|
274 |
+
|
275 |
+
return {
|
276 |
+
"series": series,
|
277 |
+
"frame": frame,
|
278 |
+
"index": index,
|
279 |
+
"scalars": scalars,
|
280 |
+
"mi": mi,
|
281 |
+
"sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()},
|
282 |
+
"sp_frame": {"float": _create_sp_frame()},
|
283 |
+
"cat": cat,
|
284 |
+
"timestamp": timestamp,
|
285 |
+
"offsets": off,
|
286 |
+
}
|
287 |
+
|
288 |
+
|
289 |
+
def platform_name():
|
290 |
+
return "_".join(
|
291 |
+
[
|
292 |
+
str(pandas.__version__),
|
293 |
+
str(pl.machine()),
|
294 |
+
str(pl.system().lower()),
|
295 |
+
str(pl.python_version()),
|
296 |
+
]
|
297 |
+
)
|
298 |
+
|
299 |
+
|
300 |
+
def write_legacy_pickles(output_dir):
|
301 |
+
version = pandas.__version__
|
302 |
+
|
303 |
+
print(
|
304 |
+
"This script generates a storage file for the current arch, system, "
|
305 |
+
"and python version"
|
306 |
+
)
|
307 |
+
print(f" pandas version: {version}")
|
308 |
+
print(f" output dir : {output_dir}")
|
309 |
+
print(" storage format: pickle")
|
310 |
+
|
311 |
+
pth = f"{platform_name()}.pickle"
|
312 |
+
|
313 |
+
with open(os.path.join(output_dir, pth), "wb") as fh:
|
314 |
+
pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL)
|
315 |
+
|
316 |
+
print(f"created pickle file: {pth}")
|
317 |
+
|
318 |
+
|
319 |
+
def write_legacy_file():
|
320 |
+
# force our cwd to be the first searched
|
321 |
+
sys.path.insert(0, "")
|
322 |
+
|
323 |
+
if not 3 <= len(sys.argv) <= 4:
|
324 |
+
sys.exit(
|
325 |
+
"Specify output directory and storage type: generate_legacy_"
|
326 |
+
"storage_files.py <output_dir> <storage_type> "
|
327 |
+
)
|
328 |
+
|
329 |
+
output_dir = str(sys.argv[1])
|
330 |
+
storage_type = str(sys.argv[2])
|
331 |
+
|
332 |
+
if not os.path.exists(output_dir):
|
333 |
+
os.mkdir(output_dir)
|
334 |
+
|
335 |
+
if storage_type == "pickle":
|
336 |
+
write_legacy_pickles(output_dir=output_dir)
|
337 |
+
else:
|
338 |
+
sys.exit("storage_type must be one of {'pickle'}")
|
339 |
+
|
340 |
+
|
341 |
+
if __name__ == "__main__":
|
342 |
+
write_legacy_file()
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_clipboard.py
ADDED
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from textwrap import dedent
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas.errors import (
|
7 |
+
PyperclipException,
|
8 |
+
PyperclipWindowsException,
|
9 |
+
)
|
10 |
+
|
11 |
+
import pandas as pd
|
12 |
+
from pandas import (
|
13 |
+
NA,
|
14 |
+
DataFrame,
|
15 |
+
Series,
|
16 |
+
get_option,
|
17 |
+
read_clipboard,
|
18 |
+
)
|
19 |
+
import pandas._testing as tm
|
20 |
+
from pandas.core.arrays import (
|
21 |
+
ArrowStringArray,
|
22 |
+
StringArray,
|
23 |
+
)
|
24 |
+
|
25 |
+
from pandas.io.clipboard import (
|
26 |
+
CheckedCall,
|
27 |
+
_stringifyText,
|
28 |
+
init_qt_clipboard,
|
29 |
+
)
|
30 |
+
|
31 |
+
|
32 |
+
def build_kwargs(sep, excel):
|
33 |
+
kwargs = {}
|
34 |
+
if excel != "default":
|
35 |
+
kwargs["excel"] = excel
|
36 |
+
if sep != "default":
|
37 |
+
kwargs["sep"] = sep
|
38 |
+
return kwargs
|
39 |
+
|
40 |
+
|
41 |
+
@pytest.fixture(
|
42 |
+
params=[
|
43 |
+
"delims",
|
44 |
+
"utf8",
|
45 |
+
"utf16",
|
46 |
+
"string",
|
47 |
+
"long",
|
48 |
+
"nonascii",
|
49 |
+
"colwidth",
|
50 |
+
"mixed",
|
51 |
+
"float",
|
52 |
+
"int",
|
53 |
+
]
|
54 |
+
)
|
55 |
+
def df(request):
|
56 |
+
data_type = request.param
|
57 |
+
|
58 |
+
if data_type == "delims":
|
59 |
+
return DataFrame({"a": ['"a,\t"b|c', "d\tef`"], "b": ["hi'j", "k''lm"]})
|
60 |
+
elif data_type == "utf8":
|
61 |
+
return DataFrame({"a": ["µasd", "Ωœ∑`"], "b": ["øπ∆˚¬", "œ∑`®"]})
|
62 |
+
elif data_type == "utf16":
|
63 |
+
return DataFrame(
|
64 |
+
{"a": ["\U0001f44d\U0001f44d", "\U0001f44d\U0001f44d"], "b": ["abc", "def"]}
|
65 |
+
)
|
66 |
+
elif data_type == "string":
|
67 |
+
return DataFrame(
|
68 |
+
np.array([f"i-{i}" for i in range(15)]).reshape(5, 3), columns=list("abc")
|
69 |
+
)
|
70 |
+
elif data_type == "long":
|
71 |
+
max_rows = get_option("display.max_rows")
|
72 |
+
return DataFrame(
|
73 |
+
np.random.default_rng(2).integers(0, 10, size=(max_rows + 1, 3)),
|
74 |
+
columns=list("abc"),
|
75 |
+
)
|
76 |
+
elif data_type == "nonascii":
|
77 |
+
return DataFrame({"en": "in English".split(), "es": "en español".split()})
|
78 |
+
elif data_type == "colwidth":
|
79 |
+
_cw = get_option("display.max_colwidth") + 1
|
80 |
+
return DataFrame(
|
81 |
+
np.array(["x" * _cw for _ in range(15)]).reshape(5, 3), columns=list("abc")
|
82 |
+
)
|
83 |
+
elif data_type == "mixed":
|
84 |
+
return DataFrame(
|
85 |
+
{
|
86 |
+
"a": np.arange(1.0, 6.0) + 0.01,
|
87 |
+
"b": np.arange(1, 6).astype(np.int64),
|
88 |
+
"c": list("abcde"),
|
89 |
+
}
|
90 |
+
)
|
91 |
+
elif data_type == "float":
|
92 |
+
return DataFrame(np.random.default_rng(2).random((5, 3)), columns=list("abc"))
|
93 |
+
elif data_type == "int":
|
94 |
+
return DataFrame(
|
95 |
+
np.random.default_rng(2).integers(0, 10, (5, 3)), columns=list("abc")
|
96 |
+
)
|
97 |
+
else:
|
98 |
+
raise ValueError
|
99 |
+
|
100 |
+
|
101 |
+
@pytest.fixture
|
102 |
+
def mock_ctypes(monkeypatch):
|
103 |
+
"""
|
104 |
+
Mocks WinError to help with testing the clipboard.
|
105 |
+
"""
|
106 |
+
|
107 |
+
def _mock_win_error():
|
108 |
+
return "Window Error"
|
109 |
+
|
110 |
+
# Set raising to False because WinError won't exist on non-windows platforms
|
111 |
+
with monkeypatch.context() as m:
|
112 |
+
m.setattr("ctypes.WinError", _mock_win_error, raising=False)
|
113 |
+
yield
|
114 |
+
|
115 |
+
|
116 |
+
@pytest.mark.usefixtures("mock_ctypes")
|
117 |
+
def test_checked_call_with_bad_call(monkeypatch):
|
118 |
+
"""
|
119 |
+
Give CheckCall a function that returns a falsey value and
|
120 |
+
mock get_errno so it returns false so an exception is raised.
|
121 |
+
"""
|
122 |
+
|
123 |
+
def _return_false():
|
124 |
+
return False
|
125 |
+
|
126 |
+
monkeypatch.setattr("pandas.io.clipboard.get_errno", lambda: True)
|
127 |
+
msg = f"Error calling {_return_false.__name__} \\(Window Error\\)"
|
128 |
+
|
129 |
+
with pytest.raises(PyperclipWindowsException, match=msg):
|
130 |
+
CheckedCall(_return_false)()
|
131 |
+
|
132 |
+
|
133 |
+
@pytest.mark.usefixtures("mock_ctypes")
|
134 |
+
def test_checked_call_with_valid_call(monkeypatch):
|
135 |
+
"""
|
136 |
+
Give CheckCall a function that returns a truthy value and
|
137 |
+
mock get_errno so it returns true so an exception is not raised.
|
138 |
+
The function should return the results from _return_true.
|
139 |
+
"""
|
140 |
+
|
141 |
+
def _return_true():
|
142 |
+
return True
|
143 |
+
|
144 |
+
monkeypatch.setattr("pandas.io.clipboard.get_errno", lambda: False)
|
145 |
+
|
146 |
+
# Give CheckedCall a callable that returns a truthy value s
|
147 |
+
checked_call = CheckedCall(_return_true)
|
148 |
+
assert checked_call() is True
|
149 |
+
|
150 |
+
|
151 |
+
@pytest.mark.parametrize(
|
152 |
+
"text",
|
153 |
+
[
|
154 |
+
"String_test",
|
155 |
+
True,
|
156 |
+
1,
|
157 |
+
1.0,
|
158 |
+
1j,
|
159 |
+
],
|
160 |
+
)
|
161 |
+
def test_stringify_text(text):
|
162 |
+
valid_types = (str, int, float, bool)
|
163 |
+
|
164 |
+
if isinstance(text, valid_types):
|
165 |
+
result = _stringifyText(text)
|
166 |
+
assert result == str(text)
|
167 |
+
else:
|
168 |
+
msg = (
|
169 |
+
"only str, int, float, and bool values "
|
170 |
+
f"can be copied to the clipboard, not {type(text).__name__}"
|
171 |
+
)
|
172 |
+
with pytest.raises(PyperclipException, match=msg):
|
173 |
+
_stringifyText(text)
|
174 |
+
|
175 |
+
|
176 |
+
@pytest.fixture
|
177 |
+
def set_pyqt_clipboard(monkeypatch):
|
178 |
+
qt_cut, qt_paste = init_qt_clipboard()
|
179 |
+
with monkeypatch.context() as m:
|
180 |
+
m.setattr(pd.io.clipboard, "clipboard_set", qt_cut)
|
181 |
+
m.setattr(pd.io.clipboard, "clipboard_get", qt_paste)
|
182 |
+
yield
|
183 |
+
|
184 |
+
|
185 |
+
@pytest.fixture
|
186 |
+
def clipboard(qapp):
|
187 |
+
clip = qapp.clipboard()
|
188 |
+
yield clip
|
189 |
+
clip.clear()
|
190 |
+
|
191 |
+
|
192 |
+
@pytest.mark.single_cpu
|
193 |
+
@pytest.mark.clipboard
|
194 |
+
@pytest.mark.usefixtures("set_pyqt_clipboard")
|
195 |
+
@pytest.mark.usefixtures("clipboard")
|
196 |
+
class TestClipboard:
|
197 |
+
# Test that default arguments copy as tab delimited
|
198 |
+
# Test that explicit delimiters are respected
|
199 |
+
@pytest.mark.parametrize("sep", [None, "\t", ",", "|"])
|
200 |
+
@pytest.mark.parametrize("encoding", [None, "UTF-8", "utf-8", "utf8"])
|
201 |
+
def test_round_trip_frame_sep(self, df, sep, encoding):
|
202 |
+
df.to_clipboard(excel=None, sep=sep, encoding=encoding)
|
203 |
+
result = read_clipboard(sep=sep or "\t", index_col=0, encoding=encoding)
|
204 |
+
tm.assert_frame_equal(df, result)
|
205 |
+
|
206 |
+
# Test white space separator
|
207 |
+
def test_round_trip_frame_string(self, df):
|
208 |
+
df.to_clipboard(excel=False, sep=None)
|
209 |
+
result = read_clipboard()
|
210 |
+
assert df.to_string() == result.to_string()
|
211 |
+
assert df.shape == result.shape
|
212 |
+
|
213 |
+
# Two character separator is not supported in to_clipboard
|
214 |
+
# Test that multi-character separators are not silently passed
|
215 |
+
def test_excel_sep_warning(self, df):
|
216 |
+
with tm.assert_produces_warning(
|
217 |
+
UserWarning,
|
218 |
+
match="to_clipboard in excel mode requires a single character separator.",
|
219 |
+
check_stacklevel=False,
|
220 |
+
):
|
221 |
+
df.to_clipboard(excel=True, sep=r"\t")
|
222 |
+
|
223 |
+
# Separator is ignored when excel=False and should produce a warning
|
224 |
+
def test_copy_delim_warning(self, df):
|
225 |
+
with tm.assert_produces_warning():
|
226 |
+
df.to_clipboard(excel=False, sep="\t")
|
227 |
+
|
228 |
+
# Tests that the default behavior of to_clipboard is tab
|
229 |
+
# delimited and excel="True"
|
230 |
+
@pytest.mark.parametrize("sep", ["\t", None, "default"])
|
231 |
+
@pytest.mark.parametrize("excel", [True, None, "default"])
|
232 |
+
def test_clipboard_copy_tabs_default(self, sep, excel, df, clipboard):
|
233 |
+
kwargs = build_kwargs(sep, excel)
|
234 |
+
df.to_clipboard(**kwargs)
|
235 |
+
assert clipboard.text() == df.to_csv(sep="\t")
|
236 |
+
|
237 |
+
# Tests reading of white space separated tables
|
238 |
+
@pytest.mark.parametrize("sep", [None, "default"])
|
239 |
+
def test_clipboard_copy_strings(self, sep, df):
|
240 |
+
kwargs = build_kwargs(sep, False)
|
241 |
+
df.to_clipboard(**kwargs)
|
242 |
+
result = read_clipboard(sep=r"\s+")
|
243 |
+
assert result.to_string() == df.to_string()
|
244 |
+
assert df.shape == result.shape
|
245 |
+
|
246 |
+
def test_read_clipboard_infer_excel(self, clipboard):
|
247 |
+
# gh-19010: avoid warnings
|
248 |
+
clip_kwargs = {"engine": "python"}
|
249 |
+
|
250 |
+
text = dedent(
|
251 |
+
"""
|
252 |
+
John James\tCharlie Mingus
|
253 |
+
1\t2
|
254 |
+
4\tHarry Carney
|
255 |
+
""".strip()
|
256 |
+
)
|
257 |
+
clipboard.setText(text)
|
258 |
+
df = read_clipboard(**clip_kwargs)
|
259 |
+
|
260 |
+
# excel data is parsed correctly
|
261 |
+
assert df.iloc[1, 1] == "Harry Carney"
|
262 |
+
|
263 |
+
# having diff tab counts doesn't trigger it
|
264 |
+
text = dedent(
|
265 |
+
"""
|
266 |
+
a\t b
|
267 |
+
1 2
|
268 |
+
3 4
|
269 |
+
""".strip()
|
270 |
+
)
|
271 |
+
clipboard.setText(text)
|
272 |
+
res = read_clipboard(**clip_kwargs)
|
273 |
+
|
274 |
+
text = dedent(
|
275 |
+
"""
|
276 |
+
a b
|
277 |
+
1 2
|
278 |
+
3 4
|
279 |
+
""".strip()
|
280 |
+
)
|
281 |
+
clipboard.setText(text)
|
282 |
+
exp = read_clipboard(**clip_kwargs)
|
283 |
+
|
284 |
+
tm.assert_frame_equal(res, exp)
|
285 |
+
|
286 |
+
def test_infer_excel_with_nulls(self, clipboard):
|
287 |
+
# GH41108
|
288 |
+
text = "col1\tcol2\n1\tred\n\tblue\n2\tgreen"
|
289 |
+
|
290 |
+
clipboard.setText(text)
|
291 |
+
df = read_clipboard()
|
292 |
+
df_expected = DataFrame(
|
293 |
+
data={"col1": [1, None, 2], "col2": ["red", "blue", "green"]}
|
294 |
+
)
|
295 |
+
|
296 |
+
# excel data is parsed correctly
|
297 |
+
tm.assert_frame_equal(df, df_expected)
|
298 |
+
|
299 |
+
@pytest.mark.parametrize(
|
300 |
+
"multiindex",
|
301 |
+
[
|
302 |
+
( # Can't use `dedent` here as it will remove the leading `\t`
|
303 |
+
"\n".join(
|
304 |
+
[
|
305 |
+
"\t\t\tcol1\tcol2",
|
306 |
+
"A\t0\tTrue\t1\tred",
|
307 |
+
"A\t1\tTrue\t\tblue",
|
308 |
+
"B\t0\tFalse\t2\tgreen",
|
309 |
+
]
|
310 |
+
),
|
311 |
+
[["A", "A", "B"], [0, 1, 0], [True, True, False]],
|
312 |
+
),
|
313 |
+
(
|
314 |
+
"\n".join(
|
315 |
+
["\t\tcol1\tcol2", "A\t0\t1\tred", "A\t1\t\tblue", "B\t0\t2\tgreen"]
|
316 |
+
),
|
317 |
+
[["A", "A", "B"], [0, 1, 0]],
|
318 |
+
),
|
319 |
+
],
|
320 |
+
)
|
321 |
+
def test_infer_excel_with_multiindex(self, clipboard, multiindex):
|
322 |
+
# GH41108
|
323 |
+
|
324 |
+
clipboard.setText(multiindex[0])
|
325 |
+
df = read_clipboard()
|
326 |
+
df_expected = DataFrame(
|
327 |
+
data={"col1": [1, None, 2], "col2": ["red", "blue", "green"]},
|
328 |
+
index=multiindex[1],
|
329 |
+
)
|
330 |
+
|
331 |
+
# excel data is parsed correctly
|
332 |
+
tm.assert_frame_equal(df, df_expected)
|
333 |
+
|
334 |
+
def test_invalid_encoding(self, df):
|
335 |
+
msg = "clipboard only supports utf-8 encoding"
|
336 |
+
# test case for testing invalid encoding
|
337 |
+
with pytest.raises(ValueError, match=msg):
|
338 |
+
df.to_clipboard(encoding="ascii")
|
339 |
+
with pytest.raises(NotImplementedError, match=msg):
|
340 |
+
read_clipboard(encoding="ascii")
|
341 |
+
|
342 |
+
@pytest.mark.parametrize("data", ["\U0001f44d...", "Ωœ∑`...", "abcd..."])
|
343 |
+
def test_raw_roundtrip(self, data):
|
344 |
+
# PR #25040 wide unicode wasn't copied correctly on PY3 on windows
|
345 |
+
df = DataFrame({"data": [data]})
|
346 |
+
df.to_clipboard()
|
347 |
+
result = read_clipboard()
|
348 |
+
tm.assert_frame_equal(df, result)
|
349 |
+
|
350 |
+
@pytest.mark.parametrize("engine", ["c", "python"])
|
351 |
+
def test_read_clipboard_dtype_backend(
|
352 |
+
self, clipboard, string_storage, dtype_backend, engine
|
353 |
+
):
|
354 |
+
# GH#50502
|
355 |
+
if string_storage == "pyarrow" or dtype_backend == "pyarrow":
|
356 |
+
pa = pytest.importorskip("pyarrow")
|
357 |
+
|
358 |
+
if string_storage == "python":
|
359 |
+
string_array = StringArray(np.array(["x", "y"], dtype=np.object_))
|
360 |
+
string_array_na = StringArray(np.array(["x", NA], dtype=np.object_))
|
361 |
+
|
362 |
+
elif dtype_backend == "pyarrow" and engine != "c":
|
363 |
+
pa = pytest.importorskip("pyarrow")
|
364 |
+
from pandas.arrays import ArrowExtensionArray
|
365 |
+
|
366 |
+
string_array = ArrowExtensionArray(pa.array(["x", "y"]))
|
367 |
+
string_array_na = ArrowExtensionArray(pa.array(["x", None]))
|
368 |
+
|
369 |
+
else:
|
370 |
+
string_array = ArrowStringArray(pa.array(["x", "y"]))
|
371 |
+
string_array_na = ArrowStringArray(pa.array(["x", None]))
|
372 |
+
|
373 |
+
text = """a,b,c,d,e,f,g,h,i
|
374 |
+
x,1,4.0,x,2,4.0,,True,False
|
375 |
+
y,2,5.0,,,,,False,"""
|
376 |
+
clipboard.setText(text)
|
377 |
+
|
378 |
+
with pd.option_context("mode.string_storage", string_storage):
|
379 |
+
result = read_clipboard(sep=",", dtype_backend=dtype_backend, engine=engine)
|
380 |
+
|
381 |
+
expected = DataFrame(
|
382 |
+
{
|
383 |
+
"a": string_array,
|
384 |
+
"b": Series([1, 2], dtype="Int64"),
|
385 |
+
"c": Series([4.0, 5.0], dtype="Float64"),
|
386 |
+
"d": string_array_na,
|
387 |
+
"e": Series([2, NA], dtype="Int64"),
|
388 |
+
"f": Series([4.0, NA], dtype="Float64"),
|
389 |
+
"g": Series([NA, NA], dtype="Int64"),
|
390 |
+
"h": Series([True, False], dtype="boolean"),
|
391 |
+
"i": Series([False, NA], dtype="boolean"),
|
392 |
+
}
|
393 |
+
)
|
394 |
+
if dtype_backend == "pyarrow":
|
395 |
+
from pandas.arrays import ArrowExtensionArray
|
396 |
+
|
397 |
+
expected = DataFrame(
|
398 |
+
{
|
399 |
+
col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
|
400 |
+
for col in expected.columns
|
401 |
+
}
|
402 |
+
)
|
403 |
+
expected["g"] = ArrowExtensionArray(pa.array([None, None]))
|
404 |
+
|
405 |
+
tm.assert_frame_equal(result, expected)
|
406 |
+
|
407 |
+
def test_invalid_dtype_backend(self):
|
408 |
+
msg = (
|
409 |
+
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
|
410 |
+
"'pyarrow' are allowed."
|
411 |
+
)
|
412 |
+
with pytest.raises(ValueError, match=msg):
|
413 |
+
read_clipboard(dtype_backend="numpy")
|
414 |
+
|
415 |
+
def test_to_clipboard_pos_args_deprecation(self):
|
416 |
+
# GH-54229
|
417 |
+
df = DataFrame({"a": [1, 2, 3]})
|
418 |
+
msg = (
|
419 |
+
r"Starting with pandas version 3.0 all arguments of to_clipboard "
|
420 |
+
r"will be keyword-only."
|
421 |
+
)
|
422 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
423 |
+
df.to_clipboard(True, None)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_common.py
ADDED
@@ -0,0 +1,650 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for the pandas.io.common functionalities
|
3 |
+
"""
|
4 |
+
import codecs
|
5 |
+
import errno
|
6 |
+
from functools import partial
|
7 |
+
from io import (
|
8 |
+
BytesIO,
|
9 |
+
StringIO,
|
10 |
+
UnsupportedOperation,
|
11 |
+
)
|
12 |
+
import mmap
|
13 |
+
import os
|
14 |
+
from pathlib import Path
|
15 |
+
import pickle
|
16 |
+
import tempfile
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
import pytest
|
20 |
+
|
21 |
+
from pandas.compat import is_platform_windows
|
22 |
+
import pandas.util._test_decorators as td
|
23 |
+
|
24 |
+
import pandas as pd
|
25 |
+
import pandas._testing as tm
|
26 |
+
|
27 |
+
import pandas.io.common as icom
|
28 |
+
|
29 |
+
pytestmark = pytest.mark.filterwarnings(
|
30 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
31 |
+
)
|
32 |
+
|
33 |
+
|
34 |
+
class CustomFSPath:
|
35 |
+
"""For testing fspath on unknown objects"""
|
36 |
+
|
37 |
+
def __init__(self, path) -> None:
|
38 |
+
self.path = path
|
39 |
+
|
40 |
+
def __fspath__(self):
|
41 |
+
return self.path
|
42 |
+
|
43 |
+
|
44 |
+
# Functions that consume a string path and return a string or path-like object
|
45 |
+
path_types = [str, CustomFSPath, Path]
|
46 |
+
|
47 |
+
try:
|
48 |
+
from py.path import local as LocalPath
|
49 |
+
|
50 |
+
path_types.append(LocalPath)
|
51 |
+
except ImportError:
|
52 |
+
pass
|
53 |
+
|
54 |
+
HERE = os.path.abspath(os.path.dirname(__file__))
|
55 |
+
|
56 |
+
|
57 |
+
# https://github.com/cython/cython/issues/1720
|
58 |
+
class TestCommonIOCapabilities:
|
59 |
+
data1 = """index,A,B,C,D
|
60 |
+
foo,2,3,4,5
|
61 |
+
bar,7,8,9,10
|
62 |
+
baz,12,13,14,15
|
63 |
+
qux,12,13,14,15
|
64 |
+
foo2,12,13,14,15
|
65 |
+
bar2,12,13,14,15
|
66 |
+
"""
|
67 |
+
|
68 |
+
def test_expand_user(self):
|
69 |
+
filename = "~/sometest"
|
70 |
+
expanded_name = icom._expand_user(filename)
|
71 |
+
|
72 |
+
assert expanded_name != filename
|
73 |
+
assert os.path.isabs(expanded_name)
|
74 |
+
assert os.path.expanduser(filename) == expanded_name
|
75 |
+
|
76 |
+
def test_expand_user_normal_path(self):
|
77 |
+
filename = "/somefolder/sometest"
|
78 |
+
expanded_name = icom._expand_user(filename)
|
79 |
+
|
80 |
+
assert expanded_name == filename
|
81 |
+
assert os.path.expanduser(filename) == expanded_name
|
82 |
+
|
83 |
+
def test_stringify_path_pathlib(self):
|
84 |
+
rel_path = icom.stringify_path(Path("."))
|
85 |
+
assert rel_path == "."
|
86 |
+
redundant_path = icom.stringify_path(Path("foo//bar"))
|
87 |
+
assert redundant_path == os.path.join("foo", "bar")
|
88 |
+
|
89 |
+
@td.skip_if_no("py.path")
|
90 |
+
def test_stringify_path_localpath(self):
|
91 |
+
path = os.path.join("foo", "bar")
|
92 |
+
abs_path = os.path.abspath(path)
|
93 |
+
lpath = LocalPath(path)
|
94 |
+
assert icom.stringify_path(lpath) == abs_path
|
95 |
+
|
96 |
+
def test_stringify_path_fspath(self):
|
97 |
+
p = CustomFSPath("foo/bar.csv")
|
98 |
+
result = icom.stringify_path(p)
|
99 |
+
assert result == "foo/bar.csv"
|
100 |
+
|
101 |
+
def test_stringify_file_and_path_like(self):
|
102 |
+
# GH 38125: do not stringify file objects that are also path-like
|
103 |
+
fsspec = pytest.importorskip("fsspec")
|
104 |
+
with tm.ensure_clean() as path:
|
105 |
+
with fsspec.open(f"file://{path}", mode="wb") as fsspec_obj:
|
106 |
+
assert fsspec_obj == icom.stringify_path(fsspec_obj)
|
107 |
+
|
108 |
+
@pytest.mark.parametrize("path_type", path_types)
|
109 |
+
def test_infer_compression_from_path(self, compression_format, path_type):
|
110 |
+
extension, expected = compression_format
|
111 |
+
path = path_type("foo/bar.csv" + extension)
|
112 |
+
compression = icom.infer_compression(path, compression="infer")
|
113 |
+
assert compression == expected
|
114 |
+
|
115 |
+
@pytest.mark.parametrize("path_type", [str, CustomFSPath, Path])
|
116 |
+
def test_get_handle_with_path(self, path_type):
|
117 |
+
# ignore LocalPath: it creates strange paths: /absolute/~/sometest
|
118 |
+
with tempfile.TemporaryDirectory(dir=Path.home()) as tmp:
|
119 |
+
filename = path_type("~/" + Path(tmp).name + "/sometest")
|
120 |
+
with icom.get_handle(filename, "w") as handles:
|
121 |
+
assert Path(handles.handle.name).is_absolute()
|
122 |
+
assert os.path.expanduser(filename) == handles.handle.name
|
123 |
+
|
124 |
+
def test_get_handle_with_buffer(self):
|
125 |
+
with StringIO() as input_buffer:
|
126 |
+
with icom.get_handle(input_buffer, "r") as handles:
|
127 |
+
assert handles.handle == input_buffer
|
128 |
+
assert not input_buffer.closed
|
129 |
+
assert input_buffer.closed
|
130 |
+
|
131 |
+
# Test that BytesIOWrapper(get_handle) returns correct amount of bytes every time
|
132 |
+
def test_bytesiowrapper_returns_correct_bytes(self):
|
133 |
+
# Test latin1, ucs-2, and ucs-4 chars
|
134 |
+
data = """a,b,c
|
135 |
+
1,2,3
|
136 |
+
©,®,®
|
137 |
+
Look,a snake,🐍"""
|
138 |
+
with icom.get_handle(StringIO(data), "rb", is_text=False) as handles:
|
139 |
+
result = b""
|
140 |
+
chunksize = 5
|
141 |
+
while True:
|
142 |
+
chunk = handles.handle.read(chunksize)
|
143 |
+
# Make sure each chunk is correct amount of bytes
|
144 |
+
assert len(chunk) <= chunksize
|
145 |
+
if len(chunk) < chunksize:
|
146 |
+
# Can be less amount of bytes, but only at EOF
|
147 |
+
# which happens when read returns empty
|
148 |
+
assert len(handles.handle.read()) == 0
|
149 |
+
result += chunk
|
150 |
+
break
|
151 |
+
result += chunk
|
152 |
+
assert result == data.encode("utf-8")
|
153 |
+
|
154 |
+
# Test that pyarrow can handle a file opened with get_handle
|
155 |
+
def test_get_handle_pyarrow_compat(self):
|
156 |
+
pa_csv = pytest.importorskip("pyarrow.csv")
|
157 |
+
|
158 |
+
# Test latin1, ucs-2, and ucs-4 chars
|
159 |
+
data = """a,b,c
|
160 |
+
1,2,3
|
161 |
+
©,®,®
|
162 |
+
Look,a snake,🐍"""
|
163 |
+
expected = pd.DataFrame(
|
164 |
+
{"a": ["1", "©", "Look"], "b": ["2", "®", "a snake"], "c": ["3", "®", "🐍"]}
|
165 |
+
)
|
166 |
+
s = StringIO(data)
|
167 |
+
with icom.get_handle(s, "rb", is_text=False) as handles:
|
168 |
+
df = pa_csv.read_csv(handles.handle).to_pandas()
|
169 |
+
tm.assert_frame_equal(df, expected)
|
170 |
+
assert not s.closed
|
171 |
+
|
172 |
+
def test_iterator(self):
|
173 |
+
with pd.read_csv(StringIO(self.data1), chunksize=1) as reader:
|
174 |
+
result = pd.concat(reader, ignore_index=True)
|
175 |
+
expected = pd.read_csv(StringIO(self.data1))
|
176 |
+
tm.assert_frame_equal(result, expected)
|
177 |
+
|
178 |
+
# GH12153
|
179 |
+
with pd.read_csv(StringIO(self.data1), chunksize=1) as it:
|
180 |
+
first = next(it)
|
181 |
+
tm.assert_frame_equal(first, expected.iloc[[0]])
|
182 |
+
tm.assert_frame_equal(pd.concat(it), expected.iloc[1:])
|
183 |
+
|
184 |
+
@pytest.mark.parametrize(
|
185 |
+
"reader, module, error_class, fn_ext",
|
186 |
+
[
|
187 |
+
(pd.read_csv, "os", FileNotFoundError, "csv"),
|
188 |
+
(pd.read_fwf, "os", FileNotFoundError, "txt"),
|
189 |
+
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
|
190 |
+
(pd.read_feather, "pyarrow", OSError, "feather"),
|
191 |
+
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
|
192 |
+
(pd.read_stata, "os", FileNotFoundError, "dta"),
|
193 |
+
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
|
194 |
+
(pd.read_json, "os", FileNotFoundError, "json"),
|
195 |
+
(pd.read_pickle, "os", FileNotFoundError, "pickle"),
|
196 |
+
],
|
197 |
+
)
|
198 |
+
def test_read_non_existent(self, reader, module, error_class, fn_ext):
|
199 |
+
pytest.importorskip(module)
|
200 |
+
|
201 |
+
path = os.path.join(HERE, "data", "does_not_exist." + fn_ext)
|
202 |
+
msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
|
203 |
+
msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
|
204 |
+
msg3 = "Expected object or value"
|
205 |
+
msg4 = "path_or_buf needs to be a string file path or file-like"
|
206 |
+
msg5 = (
|
207 |
+
rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
|
208 |
+
rf"'.+does_not_exist\.{fn_ext}'"
|
209 |
+
)
|
210 |
+
msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
|
211 |
+
msg7 = (
|
212 |
+
rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
|
213 |
+
)
|
214 |
+
msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}"
|
215 |
+
|
216 |
+
with pytest.raises(
|
217 |
+
error_class,
|
218 |
+
match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
|
219 |
+
):
|
220 |
+
reader(path)
|
221 |
+
|
222 |
+
@pytest.mark.parametrize(
|
223 |
+
"method, module, error_class, fn_ext",
|
224 |
+
[
|
225 |
+
(pd.DataFrame.to_csv, "os", OSError, "csv"),
|
226 |
+
(pd.DataFrame.to_html, "os", OSError, "html"),
|
227 |
+
(pd.DataFrame.to_excel, "xlrd", OSError, "xlsx"),
|
228 |
+
(pd.DataFrame.to_feather, "pyarrow", OSError, "feather"),
|
229 |
+
(pd.DataFrame.to_parquet, "pyarrow", OSError, "parquet"),
|
230 |
+
(pd.DataFrame.to_stata, "os", OSError, "dta"),
|
231 |
+
(pd.DataFrame.to_json, "os", OSError, "json"),
|
232 |
+
(pd.DataFrame.to_pickle, "os", OSError, "pickle"),
|
233 |
+
],
|
234 |
+
)
|
235 |
+
# NOTE: Missing parent directory for pd.DataFrame.to_hdf is handled by PyTables
|
236 |
+
def test_write_missing_parent_directory(self, method, module, error_class, fn_ext):
|
237 |
+
pytest.importorskip(module)
|
238 |
+
|
239 |
+
dummy_frame = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]})
|
240 |
+
|
241 |
+
path = os.path.join(HERE, "data", "missing_folder", "does_not_exist." + fn_ext)
|
242 |
+
|
243 |
+
with pytest.raises(
|
244 |
+
error_class,
|
245 |
+
match=r"Cannot save file into a non-existent directory: .*missing_folder",
|
246 |
+
):
|
247 |
+
method(dummy_frame, path)
|
248 |
+
|
249 |
+
@pytest.mark.parametrize(
|
250 |
+
"reader, module, error_class, fn_ext",
|
251 |
+
[
|
252 |
+
(pd.read_csv, "os", FileNotFoundError, "csv"),
|
253 |
+
(pd.read_table, "os", FileNotFoundError, "csv"),
|
254 |
+
(pd.read_fwf, "os", FileNotFoundError, "txt"),
|
255 |
+
(pd.read_excel, "xlrd", FileNotFoundError, "xlsx"),
|
256 |
+
(pd.read_feather, "pyarrow", OSError, "feather"),
|
257 |
+
(pd.read_hdf, "tables", FileNotFoundError, "h5"),
|
258 |
+
(pd.read_stata, "os", FileNotFoundError, "dta"),
|
259 |
+
(pd.read_sas, "os", FileNotFoundError, "sas7bdat"),
|
260 |
+
(pd.read_json, "os", FileNotFoundError, "json"),
|
261 |
+
(pd.read_pickle, "os", FileNotFoundError, "pickle"),
|
262 |
+
],
|
263 |
+
)
|
264 |
+
def test_read_expands_user_home_dir(
|
265 |
+
self, reader, module, error_class, fn_ext, monkeypatch
|
266 |
+
):
|
267 |
+
pytest.importorskip(module)
|
268 |
+
|
269 |
+
path = os.path.join("~", "does_not_exist." + fn_ext)
|
270 |
+
monkeypatch.setattr(icom, "_expand_user", lambda x: os.path.join("foo", x))
|
271 |
+
|
272 |
+
msg1 = rf"File (b')?.+does_not_exist\.{fn_ext}'? does not exist"
|
273 |
+
msg2 = rf"\[Errno 2\] No such file or directory: '.+does_not_exist\.{fn_ext}'"
|
274 |
+
msg3 = "Unexpected character found when decoding 'false'"
|
275 |
+
msg4 = "path_or_buf needs to be a string file path or file-like"
|
276 |
+
msg5 = (
|
277 |
+
rf"\[Errno 2\] File .+does_not_exist\.{fn_ext} does not exist: "
|
278 |
+
rf"'.+does_not_exist\.{fn_ext}'"
|
279 |
+
)
|
280 |
+
msg6 = rf"\[Errno 2\] 没有那个文件或目录: '.+does_not_exist\.{fn_ext}'"
|
281 |
+
msg7 = (
|
282 |
+
rf"\[Errno 2\] File o directory non esistente: '.+does_not_exist\.{fn_ext}'"
|
283 |
+
)
|
284 |
+
msg8 = rf"Failed to open local file.+does_not_exist\.{fn_ext}"
|
285 |
+
|
286 |
+
with pytest.raises(
|
287 |
+
error_class,
|
288 |
+
match=rf"({msg1}|{msg2}|{msg3}|{msg4}|{msg5}|{msg6}|{msg7}|{msg8})",
|
289 |
+
):
|
290 |
+
reader(path)
|
291 |
+
|
292 |
+
@pytest.mark.parametrize(
|
293 |
+
"reader, module, path",
|
294 |
+
[
|
295 |
+
(pd.read_csv, "os", ("io", "data", "csv", "iris.csv")),
|
296 |
+
(pd.read_table, "os", ("io", "data", "csv", "iris.csv")),
|
297 |
+
(
|
298 |
+
pd.read_fwf,
|
299 |
+
"os",
|
300 |
+
("io", "data", "fixed_width", "fixed_width_format.txt"),
|
301 |
+
),
|
302 |
+
(pd.read_excel, "xlrd", ("io", "data", "excel", "test1.xlsx")),
|
303 |
+
(
|
304 |
+
pd.read_feather,
|
305 |
+
"pyarrow",
|
306 |
+
("io", "data", "feather", "feather-0_3_1.feather"),
|
307 |
+
),
|
308 |
+
(
|
309 |
+
pd.read_hdf,
|
310 |
+
"tables",
|
311 |
+
("io", "data", "legacy_hdf", "datetimetz_object.h5"),
|
312 |
+
),
|
313 |
+
(pd.read_stata, "os", ("io", "data", "stata", "stata10_115.dta")),
|
314 |
+
(pd.read_sas, "os", ("io", "sas", "data", "test1.sas7bdat")),
|
315 |
+
(pd.read_json, "os", ("io", "json", "data", "tsframe_v012.json")),
|
316 |
+
(
|
317 |
+
pd.read_pickle,
|
318 |
+
"os",
|
319 |
+
("io", "data", "pickle", "categorical.0.25.0.pickle"),
|
320 |
+
),
|
321 |
+
],
|
322 |
+
)
|
323 |
+
def test_read_fspath_all(self, reader, module, path, datapath):
|
324 |
+
pytest.importorskip(module)
|
325 |
+
path = datapath(*path)
|
326 |
+
|
327 |
+
mypath = CustomFSPath(path)
|
328 |
+
result = reader(mypath)
|
329 |
+
expected = reader(path)
|
330 |
+
|
331 |
+
if path.endswith(".pickle"):
|
332 |
+
# categorical
|
333 |
+
tm.assert_categorical_equal(result, expected)
|
334 |
+
else:
|
335 |
+
tm.assert_frame_equal(result, expected)
|
336 |
+
|
337 |
+
@pytest.mark.parametrize(
|
338 |
+
"writer_name, writer_kwargs, module",
|
339 |
+
[
|
340 |
+
("to_csv", {}, "os"),
|
341 |
+
("to_excel", {"engine": "openpyxl"}, "openpyxl"),
|
342 |
+
("to_feather", {}, "pyarrow"),
|
343 |
+
("to_html", {}, "os"),
|
344 |
+
("to_json", {}, "os"),
|
345 |
+
("to_latex", {}, "os"),
|
346 |
+
("to_pickle", {}, "os"),
|
347 |
+
("to_stata", {"time_stamp": pd.to_datetime("2019-01-01 00:00")}, "os"),
|
348 |
+
],
|
349 |
+
)
|
350 |
+
def test_write_fspath_all(self, writer_name, writer_kwargs, module):
|
351 |
+
if writer_name in ["to_latex"]: # uses Styler implementation
|
352 |
+
pytest.importorskip("jinja2")
|
353 |
+
p1 = tm.ensure_clean("string")
|
354 |
+
p2 = tm.ensure_clean("fspath")
|
355 |
+
df = pd.DataFrame({"A": [1, 2]})
|
356 |
+
|
357 |
+
with p1 as string, p2 as fspath:
|
358 |
+
pytest.importorskip(module)
|
359 |
+
mypath = CustomFSPath(fspath)
|
360 |
+
writer = getattr(df, writer_name)
|
361 |
+
|
362 |
+
writer(string, **writer_kwargs)
|
363 |
+
writer(mypath, **writer_kwargs)
|
364 |
+
with open(string, "rb") as f_str, open(fspath, "rb") as f_path:
|
365 |
+
if writer_name == "to_excel":
|
366 |
+
# binary representation of excel contains time creation
|
367 |
+
# data that causes flaky CI failures
|
368 |
+
result = pd.read_excel(f_str, **writer_kwargs)
|
369 |
+
expected = pd.read_excel(f_path, **writer_kwargs)
|
370 |
+
tm.assert_frame_equal(result, expected)
|
371 |
+
else:
|
372 |
+
result = f_str.read()
|
373 |
+
expected = f_path.read()
|
374 |
+
assert result == expected
|
375 |
+
|
376 |
+
def test_write_fspath_hdf5(self):
|
377 |
+
# Same test as write_fspath_all, except HDF5 files aren't
|
378 |
+
# necessarily byte-for-byte identical for a given dataframe, so we'll
|
379 |
+
# have to read and compare equality
|
380 |
+
pytest.importorskip("tables")
|
381 |
+
|
382 |
+
df = pd.DataFrame({"A": [1, 2]})
|
383 |
+
p1 = tm.ensure_clean("string")
|
384 |
+
p2 = tm.ensure_clean("fspath")
|
385 |
+
|
386 |
+
with p1 as string, p2 as fspath:
|
387 |
+
mypath = CustomFSPath(fspath)
|
388 |
+
df.to_hdf(mypath, key="bar")
|
389 |
+
df.to_hdf(string, key="bar")
|
390 |
+
|
391 |
+
result = pd.read_hdf(fspath, key="bar")
|
392 |
+
expected = pd.read_hdf(string, key="bar")
|
393 |
+
|
394 |
+
tm.assert_frame_equal(result, expected)
|
395 |
+
|
396 |
+
|
397 |
+
@pytest.fixture
|
398 |
+
def mmap_file(datapath):
|
399 |
+
return datapath("io", "data", "csv", "test_mmap.csv")
|
400 |
+
|
401 |
+
|
402 |
+
class TestMMapWrapper:
|
403 |
+
def test_constructor_bad_file(self, mmap_file):
|
404 |
+
non_file = StringIO("I am not a file")
|
405 |
+
non_file.fileno = lambda: -1
|
406 |
+
|
407 |
+
# the error raised is different on Windows
|
408 |
+
if is_platform_windows():
|
409 |
+
msg = "The parameter is incorrect"
|
410 |
+
err = OSError
|
411 |
+
else:
|
412 |
+
msg = "[Errno 22]"
|
413 |
+
err = mmap.error
|
414 |
+
|
415 |
+
with pytest.raises(err, match=msg):
|
416 |
+
icom._maybe_memory_map(non_file, True)
|
417 |
+
|
418 |
+
with open(mmap_file, encoding="utf-8") as target:
|
419 |
+
pass
|
420 |
+
|
421 |
+
msg = "I/O operation on closed file"
|
422 |
+
with pytest.raises(ValueError, match=msg):
|
423 |
+
icom._maybe_memory_map(target, True)
|
424 |
+
|
425 |
+
def test_next(self, mmap_file):
|
426 |
+
with open(mmap_file, encoding="utf-8") as target:
|
427 |
+
lines = target.readlines()
|
428 |
+
|
429 |
+
with icom.get_handle(
|
430 |
+
target, "r", is_text=True, memory_map=True
|
431 |
+
) as wrappers:
|
432 |
+
wrapper = wrappers.handle
|
433 |
+
assert isinstance(wrapper.buffer.buffer, mmap.mmap)
|
434 |
+
|
435 |
+
for line in lines:
|
436 |
+
next_line = next(wrapper)
|
437 |
+
assert next_line.strip() == line.strip()
|
438 |
+
|
439 |
+
with pytest.raises(StopIteration, match=r"^$"):
|
440 |
+
next(wrapper)
|
441 |
+
|
442 |
+
def test_unknown_engine(self):
|
443 |
+
with tm.ensure_clean() as path:
|
444 |
+
df = pd.DataFrame(
|
445 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
446 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
447 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
448 |
+
)
|
449 |
+
df.to_csv(path)
|
450 |
+
with pytest.raises(ValueError, match="Unknown engine"):
|
451 |
+
pd.read_csv(path, engine="pyt")
|
452 |
+
|
453 |
+
def test_binary_mode(self):
|
454 |
+
"""
|
455 |
+
'encoding' shouldn't be passed to 'open' in binary mode.
|
456 |
+
|
457 |
+
GH 35058
|
458 |
+
"""
|
459 |
+
with tm.ensure_clean() as path:
|
460 |
+
df = pd.DataFrame(
|
461 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
462 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
463 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
464 |
+
)
|
465 |
+
df.to_csv(path, mode="w+b")
|
466 |
+
tm.assert_frame_equal(df, pd.read_csv(path, index_col=0))
|
467 |
+
|
468 |
+
@pytest.mark.parametrize("encoding", ["utf-16", "utf-32"])
|
469 |
+
@pytest.mark.parametrize("compression_", ["bz2", "xz"])
|
470 |
+
def test_warning_missing_utf_bom(self, encoding, compression_):
|
471 |
+
"""
|
472 |
+
bz2 and xz do not write the byte order mark (BOM) for utf-16/32.
|
473 |
+
|
474 |
+
https://stackoverflow.com/questions/55171439
|
475 |
+
|
476 |
+
GH 35681
|
477 |
+
"""
|
478 |
+
df = pd.DataFrame(
|
479 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
480 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
481 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
482 |
+
)
|
483 |
+
with tm.ensure_clean() as path:
|
484 |
+
with tm.assert_produces_warning(UnicodeWarning):
|
485 |
+
df.to_csv(path, compression=compression_, encoding=encoding)
|
486 |
+
|
487 |
+
# reading should fail (otherwise we wouldn't need the warning)
|
488 |
+
msg = r"UTF-\d+ stream does not start with BOM"
|
489 |
+
with pytest.raises(UnicodeError, match=msg):
|
490 |
+
pd.read_csv(path, compression=compression_, encoding=encoding)
|
491 |
+
|
492 |
+
|
493 |
+
def test_is_fsspec_url():
|
494 |
+
assert icom.is_fsspec_url("gcs://pandas/somethingelse.com")
|
495 |
+
assert icom.is_fsspec_url("gs://pandas/somethingelse.com")
|
496 |
+
# the following is the only remote URL that is handled without fsspec
|
497 |
+
assert not icom.is_fsspec_url("http://pandas/somethingelse.com")
|
498 |
+
assert not icom.is_fsspec_url("random:pandas/somethingelse.com")
|
499 |
+
assert not icom.is_fsspec_url("/local/path")
|
500 |
+
assert not icom.is_fsspec_url("relative/local/path")
|
501 |
+
# fsspec URL in string should not be recognized
|
502 |
+
assert not icom.is_fsspec_url("this is not fsspec://url")
|
503 |
+
assert not icom.is_fsspec_url("{'url': 'gs://pandas/somethingelse.com'}")
|
504 |
+
# accept everything that conforms to RFC 3986 schema
|
505 |
+
assert icom.is_fsspec_url("RFC-3986+compliant.spec://something")
|
506 |
+
|
507 |
+
|
508 |
+
@pytest.mark.parametrize("encoding", [None, "utf-8"])
|
509 |
+
@pytest.mark.parametrize("format", ["csv", "json"])
|
510 |
+
def test_codecs_encoding(encoding, format):
|
511 |
+
# GH39247
|
512 |
+
expected = pd.DataFrame(
|
513 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
514 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
515 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
516 |
+
)
|
517 |
+
with tm.ensure_clean() as path:
|
518 |
+
with codecs.open(path, mode="w", encoding=encoding) as handle:
|
519 |
+
getattr(expected, f"to_{format}")(handle)
|
520 |
+
with codecs.open(path, mode="r", encoding=encoding) as handle:
|
521 |
+
if format == "csv":
|
522 |
+
df = pd.read_csv(handle, index_col=0)
|
523 |
+
else:
|
524 |
+
df = pd.read_json(handle)
|
525 |
+
tm.assert_frame_equal(expected, df)
|
526 |
+
|
527 |
+
|
528 |
+
def test_codecs_get_writer_reader():
|
529 |
+
# GH39247
|
530 |
+
expected = pd.DataFrame(
|
531 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
532 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
533 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
534 |
+
)
|
535 |
+
with tm.ensure_clean() as path:
|
536 |
+
with open(path, "wb") as handle:
|
537 |
+
with codecs.getwriter("utf-8")(handle) as encoded:
|
538 |
+
expected.to_csv(encoded)
|
539 |
+
with open(path, "rb") as handle:
|
540 |
+
with codecs.getreader("utf-8")(handle) as encoded:
|
541 |
+
df = pd.read_csv(encoded, index_col=0)
|
542 |
+
tm.assert_frame_equal(expected, df)
|
543 |
+
|
544 |
+
|
545 |
+
@pytest.mark.parametrize(
|
546 |
+
"io_class,mode,msg",
|
547 |
+
[
|
548 |
+
(BytesIO, "t", "a bytes-like object is required, not 'str'"),
|
549 |
+
(StringIO, "b", "string argument expected, got 'bytes'"),
|
550 |
+
],
|
551 |
+
)
|
552 |
+
def test_explicit_encoding(io_class, mode, msg):
|
553 |
+
# GH39247; this test makes sure that if a user provides mode="*t" or "*b",
|
554 |
+
# it is used. In the case of this test it leads to an error as intentionally the
|
555 |
+
# wrong mode is requested
|
556 |
+
expected = pd.DataFrame(
|
557 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
558 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
559 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
560 |
+
)
|
561 |
+
with io_class() as buffer:
|
562 |
+
with pytest.raises(TypeError, match=msg):
|
563 |
+
expected.to_csv(buffer, mode=f"w{mode}")
|
564 |
+
|
565 |
+
|
566 |
+
@pytest.mark.parametrize("encoding_errors", [None, "strict", "replace"])
|
567 |
+
@pytest.mark.parametrize("format", ["csv", "json"])
|
568 |
+
def test_encoding_errors(encoding_errors, format):
|
569 |
+
# GH39450
|
570 |
+
msg = "'utf-8' codec can't decode byte"
|
571 |
+
bad_encoding = b"\xe4"
|
572 |
+
|
573 |
+
if format == "csv":
|
574 |
+
content = b"," + bad_encoding + b"\n" + bad_encoding * 2 + b"," + bad_encoding
|
575 |
+
reader = partial(pd.read_csv, index_col=0)
|
576 |
+
else:
|
577 |
+
content = (
|
578 |
+
b'{"'
|
579 |
+
+ bad_encoding * 2
|
580 |
+
+ b'": {"'
|
581 |
+
+ bad_encoding
|
582 |
+
+ b'":"'
|
583 |
+
+ bad_encoding
|
584 |
+
+ b'"}}'
|
585 |
+
)
|
586 |
+
reader = partial(pd.read_json, orient="index")
|
587 |
+
with tm.ensure_clean() as path:
|
588 |
+
file = Path(path)
|
589 |
+
file.write_bytes(content)
|
590 |
+
|
591 |
+
if encoding_errors != "replace":
|
592 |
+
with pytest.raises(UnicodeDecodeError, match=msg):
|
593 |
+
reader(path, encoding_errors=encoding_errors)
|
594 |
+
else:
|
595 |
+
df = reader(path, encoding_errors=encoding_errors)
|
596 |
+
decoded = bad_encoding.decode(errors=encoding_errors)
|
597 |
+
expected = pd.DataFrame({decoded: [decoded]}, index=[decoded * 2])
|
598 |
+
tm.assert_frame_equal(df, expected)
|
599 |
+
|
600 |
+
|
601 |
+
def test_bad_encdoing_errors():
|
602 |
+
# GH 39777
|
603 |
+
with tm.ensure_clean() as path:
|
604 |
+
with pytest.raises(LookupError, match="unknown error handler name"):
|
605 |
+
icom.get_handle(path, "w", errors="bad")
|
606 |
+
|
607 |
+
|
608 |
+
def test_errno_attribute():
|
609 |
+
# GH 13872
|
610 |
+
with pytest.raises(FileNotFoundError, match="\\[Errno 2\\]") as err:
|
611 |
+
pd.read_csv("doesnt_exist")
|
612 |
+
assert err.errno == errno.ENOENT
|
613 |
+
|
614 |
+
|
615 |
+
def test_fail_mmap():
|
616 |
+
with pytest.raises(UnsupportedOperation, match="fileno"):
|
617 |
+
with BytesIO() as buffer:
|
618 |
+
icom.get_handle(buffer, "rb", memory_map=True)
|
619 |
+
|
620 |
+
|
621 |
+
def test_close_on_error():
|
622 |
+
# GH 47136
|
623 |
+
class TestError:
|
624 |
+
def close(self):
|
625 |
+
raise OSError("test")
|
626 |
+
|
627 |
+
with pytest.raises(OSError, match="test"):
|
628 |
+
with BytesIO() as buffer:
|
629 |
+
with icom.get_handle(buffer, "rb") as handles:
|
630 |
+
handles.created_handles.append(TestError())
|
631 |
+
|
632 |
+
|
633 |
+
@pytest.mark.parametrize(
|
634 |
+
"reader",
|
635 |
+
[
|
636 |
+
pd.read_csv,
|
637 |
+
pd.read_fwf,
|
638 |
+
pd.read_excel,
|
639 |
+
pd.read_feather,
|
640 |
+
pd.read_hdf,
|
641 |
+
pd.read_stata,
|
642 |
+
pd.read_sas,
|
643 |
+
pd.read_json,
|
644 |
+
pd.read_pickle,
|
645 |
+
],
|
646 |
+
)
|
647 |
+
def test_pickle_reader(reader):
|
648 |
+
# GH 22265
|
649 |
+
with BytesIO() as buffer:
|
650 |
+
pickle.dump(reader, buffer)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_compression.py
ADDED
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gzip
|
2 |
+
import io
|
3 |
+
import os
|
4 |
+
from pathlib import Path
|
5 |
+
import subprocess
|
6 |
+
import sys
|
7 |
+
import tarfile
|
8 |
+
import textwrap
|
9 |
+
import time
|
10 |
+
import zipfile
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
import pytest
|
14 |
+
|
15 |
+
from pandas.compat import is_platform_windows
|
16 |
+
|
17 |
+
import pandas as pd
|
18 |
+
import pandas._testing as tm
|
19 |
+
|
20 |
+
import pandas.io.common as icom
|
21 |
+
|
22 |
+
|
23 |
+
@pytest.mark.parametrize(
|
24 |
+
"obj",
|
25 |
+
[
|
26 |
+
pd.DataFrame(
|
27 |
+
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
28 |
+
columns=["X", "Y", "Z"],
|
29 |
+
),
|
30 |
+
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
|
31 |
+
],
|
32 |
+
)
|
33 |
+
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
|
34 |
+
def test_compression_size(obj, method, compression_only):
|
35 |
+
if compression_only == "tar":
|
36 |
+
compression_only = {"method": "tar", "mode": "w:gz"}
|
37 |
+
|
38 |
+
with tm.ensure_clean() as path:
|
39 |
+
getattr(obj, method)(path, compression=compression_only)
|
40 |
+
compressed_size = os.path.getsize(path)
|
41 |
+
getattr(obj, method)(path, compression=None)
|
42 |
+
uncompressed_size = os.path.getsize(path)
|
43 |
+
assert uncompressed_size > compressed_size
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.parametrize(
|
47 |
+
"obj",
|
48 |
+
[
|
49 |
+
pd.DataFrame(
|
50 |
+
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
51 |
+
columns=["X", "Y", "Z"],
|
52 |
+
),
|
53 |
+
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
|
54 |
+
],
|
55 |
+
)
|
56 |
+
@pytest.mark.parametrize("method", ["to_csv", "to_json"])
|
57 |
+
def test_compression_size_fh(obj, method, compression_only):
|
58 |
+
with tm.ensure_clean() as path:
|
59 |
+
with icom.get_handle(
|
60 |
+
path,
|
61 |
+
"w:gz" if compression_only == "tar" else "w",
|
62 |
+
compression=compression_only,
|
63 |
+
) as handles:
|
64 |
+
getattr(obj, method)(handles.handle)
|
65 |
+
assert not handles.handle.closed
|
66 |
+
compressed_size = os.path.getsize(path)
|
67 |
+
with tm.ensure_clean() as path:
|
68 |
+
with icom.get_handle(path, "w", compression=None) as handles:
|
69 |
+
getattr(obj, method)(handles.handle)
|
70 |
+
assert not handles.handle.closed
|
71 |
+
uncompressed_size = os.path.getsize(path)
|
72 |
+
assert uncompressed_size > compressed_size
|
73 |
+
|
74 |
+
|
75 |
+
@pytest.mark.parametrize(
|
76 |
+
"write_method, write_kwargs, read_method",
|
77 |
+
[
|
78 |
+
("to_csv", {"index": False}, pd.read_csv),
|
79 |
+
("to_json", {}, pd.read_json),
|
80 |
+
("to_pickle", {}, pd.read_pickle),
|
81 |
+
],
|
82 |
+
)
|
83 |
+
def test_dataframe_compression_defaults_to_infer(
|
84 |
+
write_method, write_kwargs, read_method, compression_only, compression_to_extension
|
85 |
+
):
|
86 |
+
# GH22004
|
87 |
+
input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=["X", "Y", "Z"])
|
88 |
+
extension = compression_to_extension[compression_only]
|
89 |
+
with tm.ensure_clean("compressed" + extension) as path:
|
90 |
+
getattr(input, write_method)(path, **write_kwargs)
|
91 |
+
output = read_method(path, compression=compression_only)
|
92 |
+
tm.assert_frame_equal(output, input)
|
93 |
+
|
94 |
+
|
95 |
+
@pytest.mark.parametrize(
|
96 |
+
"write_method,write_kwargs,read_method,read_kwargs",
|
97 |
+
[
|
98 |
+
("to_csv", {"index": False, "header": True}, pd.read_csv, {"squeeze": True}),
|
99 |
+
("to_json", {}, pd.read_json, {"typ": "series"}),
|
100 |
+
("to_pickle", {}, pd.read_pickle, {}),
|
101 |
+
],
|
102 |
+
)
|
103 |
+
def test_series_compression_defaults_to_infer(
|
104 |
+
write_method,
|
105 |
+
write_kwargs,
|
106 |
+
read_method,
|
107 |
+
read_kwargs,
|
108 |
+
compression_only,
|
109 |
+
compression_to_extension,
|
110 |
+
):
|
111 |
+
# GH22004
|
112 |
+
input = pd.Series([0, 5, -2, 10], name="X")
|
113 |
+
extension = compression_to_extension[compression_only]
|
114 |
+
with tm.ensure_clean("compressed" + extension) as path:
|
115 |
+
getattr(input, write_method)(path, **write_kwargs)
|
116 |
+
if "squeeze" in read_kwargs:
|
117 |
+
kwargs = read_kwargs.copy()
|
118 |
+
del kwargs["squeeze"]
|
119 |
+
output = read_method(path, compression=compression_only, **kwargs).squeeze(
|
120 |
+
"columns"
|
121 |
+
)
|
122 |
+
else:
|
123 |
+
output = read_method(path, compression=compression_only, **read_kwargs)
|
124 |
+
tm.assert_series_equal(output, input, check_names=False)
|
125 |
+
|
126 |
+
|
127 |
+
def test_compression_warning(compression_only):
|
128 |
+
# Assert that passing a file object to to_csv while explicitly specifying a
|
129 |
+
# compression protocol triggers a RuntimeWarning, as per GH21227.
|
130 |
+
df = pd.DataFrame(
|
131 |
+
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
132 |
+
columns=["X", "Y", "Z"],
|
133 |
+
)
|
134 |
+
with tm.ensure_clean() as path:
|
135 |
+
with icom.get_handle(path, "w", compression=compression_only) as handles:
|
136 |
+
with tm.assert_produces_warning(RuntimeWarning):
|
137 |
+
df.to_csv(handles.handle, compression=compression_only)
|
138 |
+
|
139 |
+
|
140 |
+
def test_compression_binary(compression_only):
|
141 |
+
"""
|
142 |
+
Binary file handles support compression.
|
143 |
+
|
144 |
+
GH22555
|
145 |
+
"""
|
146 |
+
df = pd.DataFrame(
|
147 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
148 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
149 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
150 |
+
)
|
151 |
+
|
152 |
+
# with a file
|
153 |
+
with tm.ensure_clean() as path:
|
154 |
+
with open(path, mode="wb") as file:
|
155 |
+
df.to_csv(file, mode="wb", compression=compression_only)
|
156 |
+
file.seek(0) # file shouldn't be closed
|
157 |
+
tm.assert_frame_equal(
|
158 |
+
df, pd.read_csv(path, index_col=0, compression=compression_only)
|
159 |
+
)
|
160 |
+
|
161 |
+
# with BytesIO
|
162 |
+
file = io.BytesIO()
|
163 |
+
df.to_csv(file, mode="wb", compression=compression_only)
|
164 |
+
file.seek(0) # file shouldn't be closed
|
165 |
+
tm.assert_frame_equal(
|
166 |
+
df, pd.read_csv(file, index_col=0, compression=compression_only)
|
167 |
+
)
|
168 |
+
|
169 |
+
|
170 |
+
def test_gzip_reproducibility_file_name():
|
171 |
+
"""
|
172 |
+
Gzip should create reproducible archives with mtime.
|
173 |
+
|
174 |
+
Note: Archives created with different filenames will still be different!
|
175 |
+
|
176 |
+
GH 28103
|
177 |
+
"""
|
178 |
+
df = pd.DataFrame(
|
179 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
180 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
181 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
182 |
+
)
|
183 |
+
compression_options = {"method": "gzip", "mtime": 1}
|
184 |
+
|
185 |
+
# test for filename
|
186 |
+
with tm.ensure_clean() as path:
|
187 |
+
path = Path(path)
|
188 |
+
df.to_csv(path, compression=compression_options)
|
189 |
+
time.sleep(0.1)
|
190 |
+
output = path.read_bytes()
|
191 |
+
df.to_csv(path, compression=compression_options)
|
192 |
+
assert output == path.read_bytes()
|
193 |
+
|
194 |
+
|
195 |
+
def test_gzip_reproducibility_file_object():
|
196 |
+
"""
|
197 |
+
Gzip should create reproducible archives with mtime.
|
198 |
+
|
199 |
+
GH 28103
|
200 |
+
"""
|
201 |
+
df = pd.DataFrame(
|
202 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
203 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
204 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
205 |
+
)
|
206 |
+
compression_options = {"method": "gzip", "mtime": 1}
|
207 |
+
|
208 |
+
# test for file object
|
209 |
+
buffer = io.BytesIO()
|
210 |
+
df.to_csv(buffer, compression=compression_options, mode="wb")
|
211 |
+
output = buffer.getvalue()
|
212 |
+
time.sleep(0.1)
|
213 |
+
buffer = io.BytesIO()
|
214 |
+
df.to_csv(buffer, compression=compression_options, mode="wb")
|
215 |
+
assert output == buffer.getvalue()
|
216 |
+
|
217 |
+
|
218 |
+
@pytest.mark.single_cpu
|
219 |
+
def test_with_missing_lzma():
|
220 |
+
"""Tests if import pandas works when lzma is not present."""
|
221 |
+
# https://github.com/pandas-dev/pandas/issues/27575
|
222 |
+
code = textwrap.dedent(
|
223 |
+
"""\
|
224 |
+
import sys
|
225 |
+
sys.modules['lzma'] = None
|
226 |
+
import pandas
|
227 |
+
"""
|
228 |
+
)
|
229 |
+
subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE)
|
230 |
+
|
231 |
+
|
232 |
+
@pytest.mark.single_cpu
|
233 |
+
def test_with_missing_lzma_runtime():
|
234 |
+
"""Tests if RuntimeError is hit when calling lzma without
|
235 |
+
having the module available.
|
236 |
+
"""
|
237 |
+
code = textwrap.dedent(
|
238 |
+
"""
|
239 |
+
import sys
|
240 |
+
import pytest
|
241 |
+
sys.modules['lzma'] = None
|
242 |
+
import pandas as pd
|
243 |
+
df = pd.DataFrame()
|
244 |
+
with pytest.raises(RuntimeError, match='lzma module'):
|
245 |
+
df.to_csv('foo.csv', compression='xz')
|
246 |
+
"""
|
247 |
+
)
|
248 |
+
subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE)
|
249 |
+
|
250 |
+
|
251 |
+
@pytest.mark.parametrize(
|
252 |
+
"obj",
|
253 |
+
[
|
254 |
+
pd.DataFrame(
|
255 |
+
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
256 |
+
columns=["X", "Y", "Z"],
|
257 |
+
),
|
258 |
+
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
|
259 |
+
],
|
260 |
+
)
|
261 |
+
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
|
262 |
+
def test_gzip_compression_level(obj, method):
|
263 |
+
# GH33196
|
264 |
+
with tm.ensure_clean() as path:
|
265 |
+
getattr(obj, method)(path, compression="gzip")
|
266 |
+
compressed_size_default = os.path.getsize(path)
|
267 |
+
getattr(obj, method)(path, compression={"method": "gzip", "compresslevel": 1})
|
268 |
+
compressed_size_fast = os.path.getsize(path)
|
269 |
+
assert compressed_size_default < compressed_size_fast
|
270 |
+
|
271 |
+
|
272 |
+
@pytest.mark.parametrize(
|
273 |
+
"obj",
|
274 |
+
[
|
275 |
+
pd.DataFrame(
|
276 |
+
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
277 |
+
columns=["X", "Y", "Z"],
|
278 |
+
),
|
279 |
+
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
|
280 |
+
],
|
281 |
+
)
|
282 |
+
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
|
283 |
+
def test_xz_compression_level_read(obj, method):
|
284 |
+
with tm.ensure_clean() as path:
|
285 |
+
getattr(obj, method)(path, compression="xz")
|
286 |
+
compressed_size_default = os.path.getsize(path)
|
287 |
+
getattr(obj, method)(path, compression={"method": "xz", "preset": 1})
|
288 |
+
compressed_size_fast = os.path.getsize(path)
|
289 |
+
assert compressed_size_default < compressed_size_fast
|
290 |
+
if method == "to_csv":
|
291 |
+
pd.read_csv(path, compression="xz")
|
292 |
+
|
293 |
+
|
294 |
+
@pytest.mark.parametrize(
|
295 |
+
"obj",
|
296 |
+
[
|
297 |
+
pd.DataFrame(
|
298 |
+
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
299 |
+
columns=["X", "Y", "Z"],
|
300 |
+
),
|
301 |
+
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
|
302 |
+
],
|
303 |
+
)
|
304 |
+
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
|
305 |
+
def test_bzip_compression_level(obj, method):
|
306 |
+
"""GH33196 bzip needs file size > 100k to show a size difference between
|
307 |
+
compression levels, so here we just check if the call works when
|
308 |
+
compression is passed as a dict.
|
309 |
+
"""
|
310 |
+
with tm.ensure_clean() as path:
|
311 |
+
getattr(obj, method)(path, compression={"method": "bz2", "compresslevel": 1})
|
312 |
+
|
313 |
+
|
314 |
+
@pytest.mark.parametrize(
|
315 |
+
"suffix,archive",
|
316 |
+
[
|
317 |
+
(".zip", zipfile.ZipFile),
|
318 |
+
(".tar", tarfile.TarFile),
|
319 |
+
],
|
320 |
+
)
|
321 |
+
def test_empty_archive_zip(suffix, archive):
|
322 |
+
with tm.ensure_clean(filename=suffix) as path:
|
323 |
+
with archive(path, "w"):
|
324 |
+
pass
|
325 |
+
with pytest.raises(ValueError, match="Zero files found"):
|
326 |
+
pd.read_csv(path)
|
327 |
+
|
328 |
+
|
329 |
+
def test_ambiguous_archive_zip():
|
330 |
+
with tm.ensure_clean(filename=".zip") as path:
|
331 |
+
with zipfile.ZipFile(path, "w") as file:
|
332 |
+
file.writestr("a.csv", "foo,bar")
|
333 |
+
file.writestr("b.csv", "foo,bar")
|
334 |
+
with pytest.raises(ValueError, match="Multiple files found in ZIP file"):
|
335 |
+
pd.read_csv(path)
|
336 |
+
|
337 |
+
|
338 |
+
def test_ambiguous_archive_tar(tmp_path):
|
339 |
+
csvAPath = tmp_path / "a.csv"
|
340 |
+
with open(csvAPath, "w", encoding="utf-8") as a:
|
341 |
+
a.write("foo,bar\n")
|
342 |
+
csvBPath = tmp_path / "b.csv"
|
343 |
+
with open(csvBPath, "w", encoding="utf-8") as b:
|
344 |
+
b.write("foo,bar\n")
|
345 |
+
|
346 |
+
tarpath = tmp_path / "archive.tar"
|
347 |
+
with tarfile.TarFile(tarpath, "w") as tar:
|
348 |
+
tar.add(csvAPath, "a.csv")
|
349 |
+
tar.add(csvBPath, "b.csv")
|
350 |
+
|
351 |
+
with pytest.raises(ValueError, match="Multiple files found in TAR archive"):
|
352 |
+
pd.read_csv(tarpath)
|
353 |
+
|
354 |
+
|
355 |
+
def test_tar_gz_to_different_filename():
|
356 |
+
with tm.ensure_clean(filename=".foo") as file:
|
357 |
+
pd.DataFrame(
|
358 |
+
[["1", "2"]],
|
359 |
+
columns=["foo", "bar"],
|
360 |
+
).to_csv(file, compression={"method": "tar", "mode": "w:gz"}, index=False)
|
361 |
+
with gzip.open(file) as uncompressed:
|
362 |
+
with tarfile.TarFile(fileobj=uncompressed) as archive:
|
363 |
+
members = archive.getmembers()
|
364 |
+
assert len(members) == 1
|
365 |
+
content = archive.extractfile(members[0]).read().decode("utf8")
|
366 |
+
|
367 |
+
if is_platform_windows():
|
368 |
+
expected = "foo,bar\r\n1,2\r\n"
|
369 |
+
else:
|
370 |
+
expected = "foo,bar\n1,2\n"
|
371 |
+
|
372 |
+
assert content == expected
|
373 |
+
|
374 |
+
|
375 |
+
def test_tar_no_error_on_close():
|
376 |
+
with io.BytesIO() as buffer:
|
377 |
+
with icom._BytesTarFile(fileobj=buffer, mode="w"):
|
378 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_feather.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" test feather-format compat """
|
2 |
+
import numpy as np
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
import pandas._testing as tm
|
7 |
+
from pandas.core.arrays import (
|
8 |
+
ArrowStringArray,
|
9 |
+
StringArray,
|
10 |
+
)
|
11 |
+
|
12 |
+
from pandas.io.feather_format import read_feather, to_feather # isort:skip
|
13 |
+
|
14 |
+
pytestmark = pytest.mark.filterwarnings(
|
15 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
16 |
+
)
|
17 |
+
|
18 |
+
pa = pytest.importorskip("pyarrow")
|
19 |
+
|
20 |
+
|
21 |
+
@pytest.mark.single_cpu
|
22 |
+
class TestFeather:
|
23 |
+
def check_error_on_write(self, df, exc, err_msg):
|
24 |
+
# check that we are raising the exception
|
25 |
+
# on writing
|
26 |
+
|
27 |
+
with pytest.raises(exc, match=err_msg):
|
28 |
+
with tm.ensure_clean() as path:
|
29 |
+
to_feather(df, path)
|
30 |
+
|
31 |
+
def check_external_error_on_write(self, df):
|
32 |
+
# check that we are raising the exception
|
33 |
+
# on writing
|
34 |
+
|
35 |
+
with tm.external_error_raised(Exception):
|
36 |
+
with tm.ensure_clean() as path:
|
37 |
+
to_feather(df, path)
|
38 |
+
|
39 |
+
def check_round_trip(self, df, expected=None, write_kwargs={}, **read_kwargs):
|
40 |
+
if expected is None:
|
41 |
+
expected = df.copy()
|
42 |
+
|
43 |
+
with tm.ensure_clean() as path:
|
44 |
+
to_feather(df, path, **write_kwargs)
|
45 |
+
|
46 |
+
result = read_feather(path, **read_kwargs)
|
47 |
+
|
48 |
+
tm.assert_frame_equal(result, expected)
|
49 |
+
|
50 |
+
def test_error(self):
|
51 |
+
msg = "feather only support IO with DataFrames"
|
52 |
+
for obj in [
|
53 |
+
pd.Series([1, 2, 3]),
|
54 |
+
1,
|
55 |
+
"foo",
|
56 |
+
pd.Timestamp("20130101"),
|
57 |
+
np.array([1, 2, 3]),
|
58 |
+
]:
|
59 |
+
self.check_error_on_write(obj, ValueError, msg)
|
60 |
+
|
61 |
+
def test_basic(self):
|
62 |
+
df = pd.DataFrame(
|
63 |
+
{
|
64 |
+
"string": list("abc"),
|
65 |
+
"int": list(range(1, 4)),
|
66 |
+
"uint": np.arange(3, 6).astype("u1"),
|
67 |
+
"float": np.arange(4.0, 7.0, dtype="float64"),
|
68 |
+
"float_with_null": [1.0, np.nan, 3],
|
69 |
+
"bool": [True, False, True],
|
70 |
+
"bool_with_null": [True, np.nan, False],
|
71 |
+
"cat": pd.Categorical(list("abc")),
|
72 |
+
"dt": pd.DatetimeIndex(
|
73 |
+
list(pd.date_range("20130101", periods=3)), freq=None
|
74 |
+
),
|
75 |
+
"dttz": pd.DatetimeIndex(
|
76 |
+
list(pd.date_range("20130101", periods=3, tz="US/Eastern")),
|
77 |
+
freq=None,
|
78 |
+
),
|
79 |
+
"dt_with_null": [
|
80 |
+
pd.Timestamp("20130101"),
|
81 |
+
pd.NaT,
|
82 |
+
pd.Timestamp("20130103"),
|
83 |
+
],
|
84 |
+
"dtns": pd.DatetimeIndex(
|
85 |
+
list(pd.date_range("20130101", periods=3, freq="ns")), freq=None
|
86 |
+
),
|
87 |
+
}
|
88 |
+
)
|
89 |
+
df["periods"] = pd.period_range("2013", freq="M", periods=3)
|
90 |
+
df["timedeltas"] = pd.timedelta_range("1 day", periods=3)
|
91 |
+
df["intervals"] = pd.interval_range(0, 3, 3)
|
92 |
+
|
93 |
+
assert df.dttz.dtype.tz.zone == "US/Eastern"
|
94 |
+
|
95 |
+
expected = df.copy()
|
96 |
+
expected.loc[1, "bool_with_null"] = None
|
97 |
+
self.check_round_trip(df, expected=expected)
|
98 |
+
|
99 |
+
def test_duplicate_columns(self):
|
100 |
+
# https://github.com/wesm/feather/issues/53
|
101 |
+
# not currently able to handle duplicate columns
|
102 |
+
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
|
103 |
+
self.check_external_error_on_write(df)
|
104 |
+
|
105 |
+
def test_read_columns(self):
|
106 |
+
# GH 24025
|
107 |
+
df = pd.DataFrame(
|
108 |
+
{
|
109 |
+
"col1": list("abc"),
|
110 |
+
"col2": list(range(1, 4)),
|
111 |
+
"col3": list("xyz"),
|
112 |
+
"col4": list(range(4, 7)),
|
113 |
+
}
|
114 |
+
)
|
115 |
+
columns = ["col1", "col3"]
|
116 |
+
self.check_round_trip(df, expected=df[columns], columns=columns)
|
117 |
+
|
118 |
+
def test_read_columns_different_order(self):
|
119 |
+
# GH 33878
|
120 |
+
df = pd.DataFrame({"A": [1, 2], "B": ["x", "y"], "C": [True, False]})
|
121 |
+
expected = df[["B", "A"]]
|
122 |
+
self.check_round_trip(df, expected, columns=["B", "A"])
|
123 |
+
|
124 |
+
def test_unsupported_other(self):
|
125 |
+
# mixed python objects
|
126 |
+
df = pd.DataFrame({"a": ["a", 1, 2.0]})
|
127 |
+
self.check_external_error_on_write(df)
|
128 |
+
|
129 |
+
def test_rw_use_threads(self):
|
130 |
+
df = pd.DataFrame({"A": np.arange(100000)})
|
131 |
+
self.check_round_trip(df, use_threads=True)
|
132 |
+
self.check_round_trip(df, use_threads=False)
|
133 |
+
|
134 |
+
def test_path_pathlib(self):
|
135 |
+
df = pd.DataFrame(
|
136 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
137 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
138 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
139 |
+
).reset_index()
|
140 |
+
result = tm.round_trip_pathlib(df.to_feather, read_feather)
|
141 |
+
tm.assert_frame_equal(df, result)
|
142 |
+
|
143 |
+
def test_path_localpath(self):
|
144 |
+
df = pd.DataFrame(
|
145 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
146 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
147 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
148 |
+
).reset_index()
|
149 |
+
result = tm.round_trip_localpath(df.to_feather, read_feather)
|
150 |
+
tm.assert_frame_equal(df, result)
|
151 |
+
|
152 |
+
def test_passthrough_keywords(self):
|
153 |
+
df = pd.DataFrame(
|
154 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
155 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
156 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
157 |
+
).reset_index()
|
158 |
+
self.check_round_trip(df, write_kwargs={"version": 1})
|
159 |
+
|
160 |
+
@pytest.mark.network
|
161 |
+
@pytest.mark.single_cpu
|
162 |
+
def test_http_path(self, feather_file, httpserver):
|
163 |
+
# GH 29055
|
164 |
+
expected = read_feather(feather_file)
|
165 |
+
with open(feather_file, "rb") as f:
|
166 |
+
httpserver.serve_content(content=f.read())
|
167 |
+
res = read_feather(httpserver.url)
|
168 |
+
tm.assert_frame_equal(expected, res)
|
169 |
+
|
170 |
+
def test_read_feather_dtype_backend(self, string_storage, dtype_backend):
|
171 |
+
# GH#50765
|
172 |
+
df = pd.DataFrame(
|
173 |
+
{
|
174 |
+
"a": pd.Series([1, np.nan, 3], dtype="Int64"),
|
175 |
+
"b": pd.Series([1, 2, 3], dtype="Int64"),
|
176 |
+
"c": pd.Series([1.5, np.nan, 2.5], dtype="Float64"),
|
177 |
+
"d": pd.Series([1.5, 2.0, 2.5], dtype="Float64"),
|
178 |
+
"e": [True, False, None],
|
179 |
+
"f": [True, False, True],
|
180 |
+
"g": ["a", "b", "c"],
|
181 |
+
"h": ["a", "b", None],
|
182 |
+
}
|
183 |
+
)
|
184 |
+
|
185 |
+
if string_storage == "python":
|
186 |
+
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
|
187 |
+
string_array_na = StringArray(np.array(["a", "b", pd.NA], dtype=np.object_))
|
188 |
+
|
189 |
+
elif dtype_backend == "pyarrow":
|
190 |
+
from pandas.arrays import ArrowExtensionArray
|
191 |
+
|
192 |
+
string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
|
193 |
+
string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
|
194 |
+
|
195 |
+
else:
|
196 |
+
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
|
197 |
+
string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
|
198 |
+
|
199 |
+
with tm.ensure_clean() as path:
|
200 |
+
to_feather(df, path)
|
201 |
+
with pd.option_context("mode.string_storage", string_storage):
|
202 |
+
result = read_feather(path, dtype_backend=dtype_backend)
|
203 |
+
|
204 |
+
expected = pd.DataFrame(
|
205 |
+
{
|
206 |
+
"a": pd.Series([1, np.nan, 3], dtype="Int64"),
|
207 |
+
"b": pd.Series([1, 2, 3], dtype="Int64"),
|
208 |
+
"c": pd.Series([1.5, np.nan, 2.5], dtype="Float64"),
|
209 |
+
"d": pd.Series([1.5, 2.0, 2.5], dtype="Float64"),
|
210 |
+
"e": pd.Series([True, False, pd.NA], dtype="boolean"),
|
211 |
+
"f": pd.Series([True, False, True], dtype="boolean"),
|
212 |
+
"g": string_array,
|
213 |
+
"h": string_array_na,
|
214 |
+
}
|
215 |
+
)
|
216 |
+
|
217 |
+
if dtype_backend == "pyarrow":
|
218 |
+
from pandas.arrays import ArrowExtensionArray
|
219 |
+
|
220 |
+
expected = pd.DataFrame(
|
221 |
+
{
|
222 |
+
col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
|
223 |
+
for col in expected.columns
|
224 |
+
}
|
225 |
+
)
|
226 |
+
|
227 |
+
tm.assert_frame_equal(result, expected)
|
228 |
+
|
229 |
+
def test_int_columns_and_index(self):
|
230 |
+
df = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index([3, 4, 5], name="test"))
|
231 |
+
self.check_round_trip(df)
|
232 |
+
|
233 |
+
def test_invalid_dtype_backend(self):
|
234 |
+
msg = (
|
235 |
+
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
|
236 |
+
"'pyarrow' are allowed."
|
237 |
+
)
|
238 |
+
df = pd.DataFrame({"int": list(range(1, 4))})
|
239 |
+
with tm.ensure_clean("tmp.feather") as path:
|
240 |
+
df.to_feather(path)
|
241 |
+
with pytest.raises(ValueError, match=msg):
|
242 |
+
read_feather(path, dtype_backend="numpy")
|
243 |
+
|
244 |
+
def test_string_inference(self, tmp_path):
|
245 |
+
# GH#54431
|
246 |
+
path = tmp_path / "test_string_inference.p"
|
247 |
+
df = pd.DataFrame(data={"a": ["x", "y"]})
|
248 |
+
df.to_feather(path)
|
249 |
+
with pd.option_context("future.infer_string", True):
|
250 |
+
result = read_feather(path)
|
251 |
+
expected = pd.DataFrame(data={"a": ["x", "y"]}, dtype="string[pyarrow_numpy]")
|
252 |
+
tm.assert_frame_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_fsspec.py
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas import (
|
7 |
+
DataFrame,
|
8 |
+
date_range,
|
9 |
+
read_csv,
|
10 |
+
read_excel,
|
11 |
+
read_feather,
|
12 |
+
read_json,
|
13 |
+
read_parquet,
|
14 |
+
read_pickle,
|
15 |
+
read_stata,
|
16 |
+
read_table,
|
17 |
+
)
|
18 |
+
import pandas._testing as tm
|
19 |
+
from pandas.util import _test_decorators as td
|
20 |
+
|
21 |
+
pytestmark = pytest.mark.filterwarnings(
|
22 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
@pytest.fixture
|
27 |
+
def fsspectest():
|
28 |
+
pytest.importorskip("fsspec")
|
29 |
+
from fsspec import register_implementation
|
30 |
+
from fsspec.implementations.memory import MemoryFileSystem
|
31 |
+
from fsspec.registry import _registry as registry
|
32 |
+
|
33 |
+
class TestMemoryFS(MemoryFileSystem):
|
34 |
+
protocol = "testmem"
|
35 |
+
test = [None]
|
36 |
+
|
37 |
+
def __init__(self, **kwargs) -> None:
|
38 |
+
self.test[0] = kwargs.pop("test", None)
|
39 |
+
super().__init__(**kwargs)
|
40 |
+
|
41 |
+
register_implementation("testmem", TestMemoryFS, clobber=True)
|
42 |
+
yield TestMemoryFS()
|
43 |
+
registry.pop("testmem", None)
|
44 |
+
TestMemoryFS.test[0] = None
|
45 |
+
TestMemoryFS.store.clear()
|
46 |
+
|
47 |
+
|
48 |
+
@pytest.fixture
|
49 |
+
def df1():
|
50 |
+
return DataFrame(
|
51 |
+
{
|
52 |
+
"int": [1, 3],
|
53 |
+
"float": [2.0, np.nan],
|
54 |
+
"str": ["t", "s"],
|
55 |
+
"dt": date_range("2018-06-18", periods=2),
|
56 |
+
}
|
57 |
+
)
|
58 |
+
|
59 |
+
|
60 |
+
@pytest.fixture
|
61 |
+
def cleared_fs():
|
62 |
+
fsspec = pytest.importorskip("fsspec")
|
63 |
+
|
64 |
+
memfs = fsspec.filesystem("memory")
|
65 |
+
yield memfs
|
66 |
+
memfs.store.clear()
|
67 |
+
|
68 |
+
|
69 |
+
def test_read_csv(cleared_fs, df1):
|
70 |
+
text = str(df1.to_csv(index=False)).encode()
|
71 |
+
with cleared_fs.open("test/test.csv", "wb") as w:
|
72 |
+
w.write(text)
|
73 |
+
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"])
|
74 |
+
|
75 |
+
tm.assert_frame_equal(df1, df2)
|
76 |
+
|
77 |
+
|
78 |
+
def test_reasonable_error(monkeypatch, cleared_fs):
|
79 |
+
from fsspec.registry import known_implementations
|
80 |
+
|
81 |
+
with pytest.raises(ValueError, match="nosuchprotocol"):
|
82 |
+
read_csv("nosuchprotocol://test/test.csv")
|
83 |
+
err_msg = "test error message"
|
84 |
+
monkeypatch.setitem(
|
85 |
+
known_implementations,
|
86 |
+
"couldexist",
|
87 |
+
{"class": "unimportable.CouldExist", "err": err_msg},
|
88 |
+
)
|
89 |
+
with pytest.raises(ImportError, match=err_msg):
|
90 |
+
read_csv("couldexist://test/test.csv")
|
91 |
+
|
92 |
+
|
93 |
+
def test_to_csv(cleared_fs, df1):
|
94 |
+
df1.to_csv("memory://test/test.csv", index=True)
|
95 |
+
|
96 |
+
df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0)
|
97 |
+
|
98 |
+
tm.assert_frame_equal(df1, df2)
|
99 |
+
|
100 |
+
|
101 |
+
def test_to_excel(cleared_fs, df1):
|
102 |
+
pytest.importorskip("openpyxl")
|
103 |
+
ext = "xlsx"
|
104 |
+
path = f"memory://test/test.{ext}"
|
105 |
+
df1.to_excel(path, index=True)
|
106 |
+
|
107 |
+
df2 = read_excel(path, parse_dates=["dt"], index_col=0)
|
108 |
+
|
109 |
+
tm.assert_frame_equal(df1, df2)
|
110 |
+
|
111 |
+
|
112 |
+
@pytest.mark.parametrize("binary_mode", [False, True])
|
113 |
+
def test_to_csv_fsspec_object(cleared_fs, binary_mode, df1):
|
114 |
+
fsspec = pytest.importorskip("fsspec")
|
115 |
+
|
116 |
+
path = "memory://test/test.csv"
|
117 |
+
mode = "wb" if binary_mode else "w"
|
118 |
+
with fsspec.open(path, mode=mode).open() as fsspec_object:
|
119 |
+
df1.to_csv(fsspec_object, index=True)
|
120 |
+
assert not fsspec_object.closed
|
121 |
+
|
122 |
+
mode = mode.replace("w", "r")
|
123 |
+
with fsspec.open(path, mode=mode) as fsspec_object:
|
124 |
+
df2 = read_csv(
|
125 |
+
fsspec_object,
|
126 |
+
parse_dates=["dt"],
|
127 |
+
index_col=0,
|
128 |
+
)
|
129 |
+
assert not fsspec_object.closed
|
130 |
+
|
131 |
+
tm.assert_frame_equal(df1, df2)
|
132 |
+
|
133 |
+
|
134 |
+
def test_csv_options(fsspectest):
|
135 |
+
df = DataFrame({"a": [0]})
|
136 |
+
df.to_csv(
|
137 |
+
"testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False
|
138 |
+
)
|
139 |
+
assert fsspectest.test[0] == "csv_write"
|
140 |
+
read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"})
|
141 |
+
assert fsspectest.test[0] == "csv_read"
|
142 |
+
|
143 |
+
|
144 |
+
def test_read_table_options(fsspectest):
|
145 |
+
# GH #39167
|
146 |
+
df = DataFrame({"a": [0]})
|
147 |
+
df.to_csv(
|
148 |
+
"testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False
|
149 |
+
)
|
150 |
+
assert fsspectest.test[0] == "csv_write"
|
151 |
+
read_table("testmem://test/test.csv", storage_options={"test": "csv_read"})
|
152 |
+
assert fsspectest.test[0] == "csv_read"
|
153 |
+
|
154 |
+
|
155 |
+
def test_excel_options(fsspectest):
|
156 |
+
pytest.importorskip("openpyxl")
|
157 |
+
extension = "xlsx"
|
158 |
+
|
159 |
+
df = DataFrame({"a": [0]})
|
160 |
+
|
161 |
+
path = f"testmem://test/test.{extension}"
|
162 |
+
|
163 |
+
df.to_excel(path, storage_options={"test": "write"}, index=False)
|
164 |
+
assert fsspectest.test[0] == "write"
|
165 |
+
read_excel(path, storage_options={"test": "read"})
|
166 |
+
assert fsspectest.test[0] == "read"
|
167 |
+
|
168 |
+
|
169 |
+
def test_to_parquet_new_file(cleared_fs, df1):
|
170 |
+
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
|
171 |
+
pytest.importorskip("fastparquet")
|
172 |
+
|
173 |
+
df1.to_parquet(
|
174 |
+
"memory://test/test.csv", index=True, engine="fastparquet", compression=None
|
175 |
+
)
|
176 |
+
|
177 |
+
|
178 |
+
def test_arrowparquet_options(fsspectest):
|
179 |
+
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
|
180 |
+
pytest.importorskip("pyarrow")
|
181 |
+
df = DataFrame({"a": [0]})
|
182 |
+
df.to_parquet(
|
183 |
+
"testmem://test/test.csv",
|
184 |
+
engine="pyarrow",
|
185 |
+
compression=None,
|
186 |
+
storage_options={"test": "parquet_write"},
|
187 |
+
)
|
188 |
+
assert fsspectest.test[0] == "parquet_write"
|
189 |
+
read_parquet(
|
190 |
+
"testmem://test/test.csv",
|
191 |
+
engine="pyarrow",
|
192 |
+
storage_options={"test": "parquet_read"},
|
193 |
+
)
|
194 |
+
assert fsspectest.test[0] == "parquet_read"
|
195 |
+
|
196 |
+
|
197 |
+
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
|
198 |
+
def test_fastparquet_options(fsspectest):
|
199 |
+
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
|
200 |
+
pytest.importorskip("fastparquet")
|
201 |
+
|
202 |
+
df = DataFrame({"a": [0]})
|
203 |
+
df.to_parquet(
|
204 |
+
"testmem://test/test.csv",
|
205 |
+
engine="fastparquet",
|
206 |
+
compression=None,
|
207 |
+
storage_options={"test": "parquet_write"},
|
208 |
+
)
|
209 |
+
assert fsspectest.test[0] == "parquet_write"
|
210 |
+
read_parquet(
|
211 |
+
"testmem://test/test.csv",
|
212 |
+
engine="fastparquet",
|
213 |
+
storage_options={"test": "parquet_read"},
|
214 |
+
)
|
215 |
+
assert fsspectest.test[0] == "parquet_read"
|
216 |
+
|
217 |
+
|
218 |
+
@pytest.mark.single_cpu
|
219 |
+
def test_from_s3_csv(s3_public_bucket_with_data, tips_file, s3so):
|
220 |
+
pytest.importorskip("s3fs")
|
221 |
+
tm.assert_equal(
|
222 |
+
read_csv(
|
223 |
+
f"s3://{s3_public_bucket_with_data.name}/tips.csv", storage_options=s3so
|
224 |
+
),
|
225 |
+
read_csv(tips_file),
|
226 |
+
)
|
227 |
+
# the following are decompressed by pandas, not fsspec
|
228 |
+
tm.assert_equal(
|
229 |
+
read_csv(
|
230 |
+
f"s3://{s3_public_bucket_with_data.name}/tips.csv.gz", storage_options=s3so
|
231 |
+
),
|
232 |
+
read_csv(tips_file),
|
233 |
+
)
|
234 |
+
tm.assert_equal(
|
235 |
+
read_csv(
|
236 |
+
f"s3://{s3_public_bucket_with_data.name}/tips.csv.bz2", storage_options=s3so
|
237 |
+
),
|
238 |
+
read_csv(tips_file),
|
239 |
+
)
|
240 |
+
|
241 |
+
|
242 |
+
@pytest.mark.single_cpu
|
243 |
+
@pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"])
|
244 |
+
def test_s3_protocols(s3_public_bucket_with_data, tips_file, protocol, s3so):
|
245 |
+
pytest.importorskip("s3fs")
|
246 |
+
tm.assert_equal(
|
247 |
+
read_csv(
|
248 |
+
f"{protocol}://{s3_public_bucket_with_data.name}/tips.csv",
|
249 |
+
storage_options=s3so,
|
250 |
+
),
|
251 |
+
read_csv(tips_file),
|
252 |
+
)
|
253 |
+
|
254 |
+
|
255 |
+
@pytest.mark.single_cpu
|
256 |
+
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) fastparquet
|
257 |
+
def test_s3_parquet(s3_public_bucket, s3so, df1):
|
258 |
+
pytest.importorskip("fastparquet")
|
259 |
+
pytest.importorskip("s3fs")
|
260 |
+
|
261 |
+
fn = f"s3://{s3_public_bucket.name}/test.parquet"
|
262 |
+
df1.to_parquet(
|
263 |
+
fn, index=False, engine="fastparquet", compression=None, storage_options=s3so
|
264 |
+
)
|
265 |
+
df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so)
|
266 |
+
tm.assert_equal(df1, df2)
|
267 |
+
|
268 |
+
|
269 |
+
@td.skip_if_installed("fsspec")
|
270 |
+
def test_not_present_exception():
|
271 |
+
msg = "Missing optional dependency 'fsspec'|fsspec library is required"
|
272 |
+
with pytest.raises(ImportError, match=msg):
|
273 |
+
read_csv("memory://test/test.csv")
|
274 |
+
|
275 |
+
|
276 |
+
def test_feather_options(fsspectest):
|
277 |
+
pytest.importorskip("pyarrow")
|
278 |
+
df = DataFrame({"a": [0]})
|
279 |
+
df.to_feather("testmem://mockfile", storage_options={"test": "feather_write"})
|
280 |
+
assert fsspectest.test[0] == "feather_write"
|
281 |
+
out = read_feather("testmem://mockfile", storage_options={"test": "feather_read"})
|
282 |
+
assert fsspectest.test[0] == "feather_read"
|
283 |
+
tm.assert_frame_equal(df, out)
|
284 |
+
|
285 |
+
|
286 |
+
def test_pickle_options(fsspectest):
|
287 |
+
df = DataFrame({"a": [0]})
|
288 |
+
df.to_pickle("testmem://mockfile", storage_options={"test": "pickle_write"})
|
289 |
+
assert fsspectest.test[0] == "pickle_write"
|
290 |
+
out = read_pickle("testmem://mockfile", storage_options={"test": "pickle_read"})
|
291 |
+
assert fsspectest.test[0] == "pickle_read"
|
292 |
+
tm.assert_frame_equal(df, out)
|
293 |
+
|
294 |
+
|
295 |
+
def test_json_options(fsspectest, compression):
|
296 |
+
df = DataFrame({"a": [0]})
|
297 |
+
df.to_json(
|
298 |
+
"testmem://mockfile",
|
299 |
+
compression=compression,
|
300 |
+
storage_options={"test": "json_write"},
|
301 |
+
)
|
302 |
+
assert fsspectest.test[0] == "json_write"
|
303 |
+
out = read_json(
|
304 |
+
"testmem://mockfile",
|
305 |
+
compression=compression,
|
306 |
+
storage_options={"test": "json_read"},
|
307 |
+
)
|
308 |
+
assert fsspectest.test[0] == "json_read"
|
309 |
+
tm.assert_frame_equal(df, out)
|
310 |
+
|
311 |
+
|
312 |
+
def test_stata_options(fsspectest):
|
313 |
+
df = DataFrame({"a": [0]})
|
314 |
+
df.to_stata(
|
315 |
+
"testmem://mockfile", storage_options={"test": "stata_write"}, write_index=False
|
316 |
+
)
|
317 |
+
assert fsspectest.test[0] == "stata_write"
|
318 |
+
out = read_stata("testmem://mockfile", storage_options={"test": "stata_read"})
|
319 |
+
assert fsspectest.test[0] == "stata_read"
|
320 |
+
tm.assert_frame_equal(df, out.astype("int64"))
|
321 |
+
|
322 |
+
|
323 |
+
def test_markdown_options(fsspectest):
|
324 |
+
pytest.importorskip("tabulate")
|
325 |
+
df = DataFrame({"a": [0]})
|
326 |
+
df.to_markdown("testmem://mockfile", storage_options={"test": "md_write"})
|
327 |
+
assert fsspectest.test[0] == "md_write"
|
328 |
+
assert fsspectest.cat("testmem://mockfile")
|
329 |
+
|
330 |
+
|
331 |
+
def test_non_fsspec_options():
|
332 |
+
pytest.importorskip("pyarrow")
|
333 |
+
with pytest.raises(ValueError, match="storage_options"):
|
334 |
+
read_csv("localfile", storage_options={"a": True})
|
335 |
+
with pytest.raises(ValueError, match="storage_options"):
|
336 |
+
# separate test for parquet, which has a different code path
|
337 |
+
read_parquet("localfile", storage_options={"a": True})
|
338 |
+
by = io.BytesIO()
|
339 |
+
|
340 |
+
with pytest.raises(ValueError, match="storage_options"):
|
341 |
+
read_csv(by, storage_options={"a": True})
|
342 |
+
|
343 |
+
df = DataFrame({"a": [0]})
|
344 |
+
with pytest.raises(ValueError, match="storage_options"):
|
345 |
+
df.to_parquet("nonfsspecpath", storage_options={"a": True})
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gbq.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import pandas._testing as tm
|
3 |
+
|
4 |
+
|
5 |
+
def test_read_gbq_deprecated():
|
6 |
+
with tm.assert_produces_warning(FutureWarning):
|
7 |
+
with tm.external_error_raised(Exception):
|
8 |
+
pd.read_gbq("fake")
|
9 |
+
|
10 |
+
|
11 |
+
def test_to_gbq_deprecated():
|
12 |
+
with tm.assert_produces_warning(FutureWarning):
|
13 |
+
with tm.external_error_raised(Exception):
|
14 |
+
pd.DataFrame(range(1)).to_gbq("fake")
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_gcs.py
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import BytesIO
|
2 |
+
import os
|
3 |
+
import pathlib
|
4 |
+
import tarfile
|
5 |
+
import zipfile
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
from pandas import (
|
11 |
+
DataFrame,
|
12 |
+
Index,
|
13 |
+
date_range,
|
14 |
+
read_csv,
|
15 |
+
read_excel,
|
16 |
+
read_json,
|
17 |
+
read_parquet,
|
18 |
+
)
|
19 |
+
import pandas._testing as tm
|
20 |
+
from pandas.util import _test_decorators as td
|
21 |
+
|
22 |
+
pytestmark = pytest.mark.filterwarnings(
|
23 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
24 |
+
)
|
25 |
+
|
26 |
+
|
27 |
+
@pytest.fixture
|
28 |
+
def gcs_buffer():
|
29 |
+
"""Emulate GCS using a binary buffer."""
|
30 |
+
pytest.importorskip("gcsfs")
|
31 |
+
fsspec = pytest.importorskip("fsspec")
|
32 |
+
|
33 |
+
gcs_buffer = BytesIO()
|
34 |
+
gcs_buffer.close = lambda: True
|
35 |
+
|
36 |
+
class MockGCSFileSystem(fsspec.AbstractFileSystem):
|
37 |
+
@staticmethod
|
38 |
+
def open(*args, **kwargs):
|
39 |
+
gcs_buffer.seek(0)
|
40 |
+
return gcs_buffer
|
41 |
+
|
42 |
+
def ls(self, path, **kwargs):
|
43 |
+
# needed for pyarrow
|
44 |
+
return [{"name": path, "type": "file"}]
|
45 |
+
|
46 |
+
# Overwrites the default implementation from gcsfs to our mock class
|
47 |
+
fsspec.register_implementation("gs", MockGCSFileSystem, clobber=True)
|
48 |
+
|
49 |
+
return gcs_buffer
|
50 |
+
|
51 |
+
|
52 |
+
# Patches pyarrow; other processes should not pick up change
|
53 |
+
@pytest.mark.single_cpu
|
54 |
+
@pytest.mark.parametrize("format", ["csv", "json", "parquet", "excel", "markdown"])
|
55 |
+
def test_to_read_gcs(gcs_buffer, format, monkeypatch, capsys):
|
56 |
+
"""
|
57 |
+
Test that many to/read functions support GCS.
|
58 |
+
|
59 |
+
GH 33987
|
60 |
+
"""
|
61 |
+
|
62 |
+
df1 = DataFrame(
|
63 |
+
{
|
64 |
+
"int": [1, 3],
|
65 |
+
"float": [2.0, np.nan],
|
66 |
+
"str": ["t", "s"],
|
67 |
+
"dt": date_range("2018-06-18", periods=2),
|
68 |
+
}
|
69 |
+
)
|
70 |
+
|
71 |
+
path = f"gs://test/test.{format}"
|
72 |
+
|
73 |
+
if format == "csv":
|
74 |
+
df1.to_csv(path, index=True)
|
75 |
+
df2 = read_csv(path, parse_dates=["dt"], index_col=0)
|
76 |
+
elif format == "excel":
|
77 |
+
path = "gs://test/test.xlsx"
|
78 |
+
df1.to_excel(path)
|
79 |
+
df2 = read_excel(path, parse_dates=["dt"], index_col=0)
|
80 |
+
elif format == "json":
|
81 |
+
df1.to_json(path)
|
82 |
+
df2 = read_json(path, convert_dates=["dt"])
|
83 |
+
elif format == "parquet":
|
84 |
+
pytest.importorskip("pyarrow")
|
85 |
+
pa_fs = pytest.importorskip("pyarrow.fs")
|
86 |
+
|
87 |
+
class MockFileSystem(pa_fs.FileSystem):
|
88 |
+
@staticmethod
|
89 |
+
def from_uri(path):
|
90 |
+
print("Using pyarrow filesystem")
|
91 |
+
to_local = pathlib.Path(path.replace("gs://", "")).absolute().as_uri()
|
92 |
+
return pa_fs.LocalFileSystem(to_local)
|
93 |
+
|
94 |
+
with monkeypatch.context() as m:
|
95 |
+
m.setattr(pa_fs, "FileSystem", MockFileSystem)
|
96 |
+
df1.to_parquet(path)
|
97 |
+
df2 = read_parquet(path)
|
98 |
+
captured = capsys.readouterr()
|
99 |
+
assert captured.out == "Using pyarrow filesystem\nUsing pyarrow filesystem\n"
|
100 |
+
elif format == "markdown":
|
101 |
+
pytest.importorskip("tabulate")
|
102 |
+
df1.to_markdown(path)
|
103 |
+
df2 = df1
|
104 |
+
|
105 |
+
tm.assert_frame_equal(df1, df2)
|
106 |
+
|
107 |
+
|
108 |
+
def assert_equal_zip_safe(result: bytes, expected: bytes, compression: str):
|
109 |
+
"""
|
110 |
+
For zip compression, only compare the CRC-32 checksum of the file contents
|
111 |
+
to avoid checking the time-dependent last-modified timestamp which
|
112 |
+
in some CI builds is off-by-one
|
113 |
+
|
114 |
+
See https://en.wikipedia.org/wiki/ZIP_(file_format)#File_headers
|
115 |
+
"""
|
116 |
+
if compression == "zip":
|
117 |
+
# Only compare the CRC checksum of the file contents
|
118 |
+
with zipfile.ZipFile(BytesIO(result)) as exp, zipfile.ZipFile(
|
119 |
+
BytesIO(expected)
|
120 |
+
) as res:
|
121 |
+
for res_info, exp_info in zip(res.infolist(), exp.infolist()):
|
122 |
+
assert res_info.CRC == exp_info.CRC
|
123 |
+
elif compression == "tar":
|
124 |
+
with tarfile.open(fileobj=BytesIO(result)) as tar_exp, tarfile.open(
|
125 |
+
fileobj=BytesIO(expected)
|
126 |
+
) as tar_res:
|
127 |
+
for tar_res_info, tar_exp_info in zip(
|
128 |
+
tar_res.getmembers(), tar_exp.getmembers()
|
129 |
+
):
|
130 |
+
actual_file = tar_res.extractfile(tar_res_info)
|
131 |
+
expected_file = tar_exp.extractfile(tar_exp_info)
|
132 |
+
assert (actual_file is None) == (expected_file is None)
|
133 |
+
if actual_file is not None and expected_file is not None:
|
134 |
+
assert actual_file.read() == expected_file.read()
|
135 |
+
else:
|
136 |
+
assert result == expected
|
137 |
+
|
138 |
+
|
139 |
+
@pytest.mark.parametrize("encoding", ["utf-8", "cp1251"])
|
140 |
+
def test_to_csv_compression_encoding_gcs(
|
141 |
+
gcs_buffer, compression_only, encoding, compression_to_extension
|
142 |
+
):
|
143 |
+
"""
|
144 |
+
Compression and encoding should with GCS.
|
145 |
+
|
146 |
+
GH 35677 (to_csv, compression), GH 26124 (to_csv, encoding), and
|
147 |
+
GH 32392 (read_csv, encoding)
|
148 |
+
"""
|
149 |
+
df = DataFrame(
|
150 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
151 |
+
columns=Index(list("ABCD"), dtype=object),
|
152 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
153 |
+
)
|
154 |
+
|
155 |
+
# reference of compressed and encoded file
|
156 |
+
compression = {"method": compression_only}
|
157 |
+
if compression_only == "gzip":
|
158 |
+
compression["mtime"] = 1 # be reproducible
|
159 |
+
buffer = BytesIO()
|
160 |
+
df.to_csv(buffer, compression=compression, encoding=encoding, mode="wb")
|
161 |
+
|
162 |
+
# write compressed file with explicit compression
|
163 |
+
path_gcs = "gs://test/test.csv"
|
164 |
+
df.to_csv(path_gcs, compression=compression, encoding=encoding)
|
165 |
+
res = gcs_buffer.getvalue()
|
166 |
+
expected = buffer.getvalue()
|
167 |
+
assert_equal_zip_safe(res, expected, compression_only)
|
168 |
+
|
169 |
+
read_df = read_csv(
|
170 |
+
path_gcs, index_col=0, compression=compression_only, encoding=encoding
|
171 |
+
)
|
172 |
+
tm.assert_frame_equal(df, read_df)
|
173 |
+
|
174 |
+
# write compressed file with implicit compression
|
175 |
+
file_ext = compression_to_extension[compression_only]
|
176 |
+
compression["method"] = "infer"
|
177 |
+
path_gcs += f".{file_ext}"
|
178 |
+
df.to_csv(path_gcs, compression=compression, encoding=encoding)
|
179 |
+
|
180 |
+
res = gcs_buffer.getvalue()
|
181 |
+
expected = buffer.getvalue()
|
182 |
+
assert_equal_zip_safe(res, expected, compression_only)
|
183 |
+
|
184 |
+
read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding)
|
185 |
+
tm.assert_frame_equal(df, read_df)
|
186 |
+
|
187 |
+
|
188 |
+
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
|
189 |
+
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
|
190 |
+
pytest.importorskip("fastparquet")
|
191 |
+
pytest.importorskip("gcsfs")
|
192 |
+
|
193 |
+
from fsspec import AbstractFileSystem
|
194 |
+
|
195 |
+
df1 = DataFrame(
|
196 |
+
{
|
197 |
+
"int": [1, 3],
|
198 |
+
"float": [2.0, np.nan],
|
199 |
+
"str": ["t", "s"],
|
200 |
+
"dt": date_range("2018-06-18", periods=2),
|
201 |
+
}
|
202 |
+
)
|
203 |
+
|
204 |
+
class MockGCSFileSystem(AbstractFileSystem):
|
205 |
+
def open(self, path, mode="r", *args):
|
206 |
+
if "w" not in mode:
|
207 |
+
raise FileNotFoundError
|
208 |
+
return open(os.path.join(tmpdir, "test.parquet"), mode, encoding="utf-8")
|
209 |
+
|
210 |
+
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
|
211 |
+
df1.to_parquet(
|
212 |
+
"gs://test/test.csv", index=True, engine="fastparquet", compression=None
|
213 |
+
)
|
214 |
+
|
215 |
+
|
216 |
+
@td.skip_if_installed("gcsfs")
|
217 |
+
def test_gcs_not_present_exception():
|
218 |
+
with tm.external_error_raised(ImportError):
|
219 |
+
read_csv("gs://test/test.csv")
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_html.py
ADDED
@@ -0,0 +1,1657 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Iterator
|
2 |
+
from functools import partial
|
3 |
+
from io import (
|
4 |
+
BytesIO,
|
5 |
+
StringIO,
|
6 |
+
)
|
7 |
+
import os
|
8 |
+
from pathlib import Path
|
9 |
+
import re
|
10 |
+
import threading
|
11 |
+
from urllib.error import URLError
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
import pytest
|
15 |
+
|
16 |
+
from pandas.compat import is_platform_windows
|
17 |
+
import pandas.util._test_decorators as td
|
18 |
+
|
19 |
+
import pandas as pd
|
20 |
+
from pandas import (
|
21 |
+
NA,
|
22 |
+
DataFrame,
|
23 |
+
MultiIndex,
|
24 |
+
Series,
|
25 |
+
Timestamp,
|
26 |
+
date_range,
|
27 |
+
read_csv,
|
28 |
+
read_html,
|
29 |
+
to_datetime,
|
30 |
+
)
|
31 |
+
import pandas._testing as tm
|
32 |
+
from pandas.core.arrays import (
|
33 |
+
ArrowStringArray,
|
34 |
+
StringArray,
|
35 |
+
)
|
36 |
+
|
37 |
+
from pandas.io.common import file_path_to_url
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture(
|
41 |
+
params=[
|
42 |
+
"chinese_utf-16.html",
|
43 |
+
"chinese_utf-32.html",
|
44 |
+
"chinese_utf-8.html",
|
45 |
+
"letz_latin1.html",
|
46 |
+
]
|
47 |
+
)
|
48 |
+
def html_encoding_file(request, datapath):
|
49 |
+
"""Parametrized fixture for HTML encoding test filenames."""
|
50 |
+
return datapath("io", "data", "html_encoding", request.param)
|
51 |
+
|
52 |
+
|
53 |
+
def assert_framelist_equal(list1, list2, *args, **kwargs):
|
54 |
+
assert len(list1) == len(list2), (
|
55 |
+
"lists are not of equal size "
|
56 |
+
f"len(list1) == {len(list1)}, "
|
57 |
+
f"len(list2) == {len(list2)}"
|
58 |
+
)
|
59 |
+
msg = "not all list elements are DataFrames"
|
60 |
+
both_frames = all(
|
61 |
+
map(
|
62 |
+
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
|
63 |
+
list1,
|
64 |
+
list2,
|
65 |
+
)
|
66 |
+
)
|
67 |
+
assert both_frames, msg
|
68 |
+
for frame_i, frame_j in zip(list1, list2):
|
69 |
+
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
|
70 |
+
assert not frame_i.empty, "frames are both empty"
|
71 |
+
|
72 |
+
|
73 |
+
def test_bs4_version_fails(monkeypatch, datapath):
|
74 |
+
bs4 = pytest.importorskip("bs4")
|
75 |
+
pytest.importorskip("html5lib")
|
76 |
+
|
77 |
+
monkeypatch.setattr(bs4, "__version__", "4.2")
|
78 |
+
with pytest.raises(ImportError, match="Pandas requires version"):
|
79 |
+
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
|
80 |
+
|
81 |
+
|
82 |
+
def test_invalid_flavor():
|
83 |
+
url = "google.com"
|
84 |
+
flavor = "invalid flavor"
|
85 |
+
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
|
86 |
+
|
87 |
+
with pytest.raises(ValueError, match=msg):
|
88 |
+
read_html(StringIO(url), match="google", flavor=flavor)
|
89 |
+
|
90 |
+
|
91 |
+
def test_same_ordering(datapath):
|
92 |
+
pytest.importorskip("bs4")
|
93 |
+
pytest.importorskip("lxml")
|
94 |
+
pytest.importorskip("html5lib")
|
95 |
+
|
96 |
+
filename = datapath("io", "data", "html", "valid_markup.html")
|
97 |
+
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
|
98 |
+
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
|
99 |
+
assert_framelist_equal(dfs_lxml, dfs_bs4)
|
100 |
+
|
101 |
+
|
102 |
+
@pytest.fixture(
|
103 |
+
params=[
|
104 |
+
pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]),
|
105 |
+
pytest.param("lxml", marks=td.skip_if_no("lxml")),
|
106 |
+
],
|
107 |
+
)
|
108 |
+
def flavor_read_html(request):
|
109 |
+
return partial(read_html, flavor=request.param)
|
110 |
+
|
111 |
+
|
112 |
+
class TestReadHtml:
|
113 |
+
def test_literal_html_deprecation(self, flavor_read_html):
|
114 |
+
# GH 53785
|
115 |
+
msg = (
|
116 |
+
"Passing literal html to 'read_html' is deprecated and "
|
117 |
+
"will be removed in a future version. To read from a "
|
118 |
+
"literal string, wrap it in a 'StringIO' object."
|
119 |
+
)
|
120 |
+
|
121 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
122 |
+
flavor_read_html(
|
123 |
+
"""<table>
|
124 |
+
<thead>
|
125 |
+
<tr>
|
126 |
+
<th>A</th>
|
127 |
+
<th>B</th>
|
128 |
+
</tr>
|
129 |
+
</thead>
|
130 |
+
<tbody>
|
131 |
+
<tr>
|
132 |
+
<td>1</td>
|
133 |
+
<td>2</td>
|
134 |
+
</tr>
|
135 |
+
</tbody>
|
136 |
+
<tbody>
|
137 |
+
<tr>
|
138 |
+
<td>3</td>
|
139 |
+
<td>4</td>
|
140 |
+
</tr>
|
141 |
+
</tbody>
|
142 |
+
</table>"""
|
143 |
+
)
|
144 |
+
|
145 |
+
@pytest.fixture
|
146 |
+
def spam_data(self, datapath):
|
147 |
+
return datapath("io", "data", "html", "spam.html")
|
148 |
+
|
149 |
+
@pytest.fixture
|
150 |
+
def banklist_data(self, datapath):
|
151 |
+
return datapath("io", "data", "html", "banklist.html")
|
152 |
+
|
153 |
+
def test_to_html_compat(self, flavor_read_html):
|
154 |
+
df = (
|
155 |
+
DataFrame(
|
156 |
+
np.random.default_rng(2).random((4, 3)),
|
157 |
+
columns=pd.Index(list("abc"), dtype=object),
|
158 |
+
)
|
159 |
+
# pylint: disable-next=consider-using-f-string
|
160 |
+
.map("{:.3f}".format).astype(float)
|
161 |
+
)
|
162 |
+
out = df.to_html()
|
163 |
+
res = flavor_read_html(
|
164 |
+
StringIO(out), attrs={"class": "dataframe"}, index_col=0
|
165 |
+
)[0]
|
166 |
+
tm.assert_frame_equal(res, df)
|
167 |
+
|
168 |
+
def test_dtype_backend(self, string_storage, dtype_backend, flavor_read_html):
|
169 |
+
# GH#50286
|
170 |
+
df = DataFrame(
|
171 |
+
{
|
172 |
+
"a": Series([1, np.nan, 3], dtype="Int64"),
|
173 |
+
"b": Series([1, 2, 3], dtype="Int64"),
|
174 |
+
"c": Series([1.5, np.nan, 2.5], dtype="Float64"),
|
175 |
+
"d": Series([1.5, 2.0, 2.5], dtype="Float64"),
|
176 |
+
"e": [True, False, None],
|
177 |
+
"f": [True, False, True],
|
178 |
+
"g": ["a", "b", "c"],
|
179 |
+
"h": ["a", "b", None],
|
180 |
+
}
|
181 |
+
)
|
182 |
+
|
183 |
+
if string_storage == "python":
|
184 |
+
string_array = StringArray(np.array(["a", "b", "c"], dtype=np.object_))
|
185 |
+
string_array_na = StringArray(np.array(["a", "b", NA], dtype=np.object_))
|
186 |
+
elif dtype_backend == "pyarrow":
|
187 |
+
pa = pytest.importorskip("pyarrow")
|
188 |
+
from pandas.arrays import ArrowExtensionArray
|
189 |
+
|
190 |
+
string_array = ArrowExtensionArray(pa.array(["a", "b", "c"]))
|
191 |
+
string_array_na = ArrowExtensionArray(pa.array(["a", "b", None]))
|
192 |
+
else:
|
193 |
+
pa = pytest.importorskip("pyarrow")
|
194 |
+
string_array = ArrowStringArray(pa.array(["a", "b", "c"]))
|
195 |
+
string_array_na = ArrowStringArray(pa.array(["a", "b", None]))
|
196 |
+
|
197 |
+
out = df.to_html(index=False)
|
198 |
+
with pd.option_context("mode.string_storage", string_storage):
|
199 |
+
result = flavor_read_html(StringIO(out), dtype_backend=dtype_backend)[0]
|
200 |
+
|
201 |
+
expected = DataFrame(
|
202 |
+
{
|
203 |
+
"a": Series([1, np.nan, 3], dtype="Int64"),
|
204 |
+
"b": Series([1, 2, 3], dtype="Int64"),
|
205 |
+
"c": Series([1.5, np.nan, 2.5], dtype="Float64"),
|
206 |
+
"d": Series([1.5, 2.0, 2.5], dtype="Float64"),
|
207 |
+
"e": Series([True, False, NA], dtype="boolean"),
|
208 |
+
"f": Series([True, False, True], dtype="boolean"),
|
209 |
+
"g": string_array,
|
210 |
+
"h": string_array_na,
|
211 |
+
}
|
212 |
+
)
|
213 |
+
|
214 |
+
if dtype_backend == "pyarrow":
|
215 |
+
import pyarrow as pa
|
216 |
+
|
217 |
+
from pandas.arrays import ArrowExtensionArray
|
218 |
+
|
219 |
+
expected = DataFrame(
|
220 |
+
{
|
221 |
+
col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
|
222 |
+
for col in expected.columns
|
223 |
+
}
|
224 |
+
)
|
225 |
+
|
226 |
+
tm.assert_frame_equal(result, expected)
|
227 |
+
|
228 |
+
@pytest.mark.network
|
229 |
+
@pytest.mark.single_cpu
|
230 |
+
def test_banklist_url(self, httpserver, banklist_data, flavor_read_html):
|
231 |
+
with open(banklist_data, encoding="utf-8") as f:
|
232 |
+
httpserver.serve_content(content=f.read())
|
233 |
+
df1 = flavor_read_html(
|
234 |
+
# lxml cannot find attrs leave out for now
|
235 |
+
httpserver.url,
|
236 |
+
match="First Federal Bank of Florida", # attrs={"class": "dataTable"}
|
237 |
+
)
|
238 |
+
# lxml cannot find attrs leave out for now
|
239 |
+
df2 = flavor_read_html(
|
240 |
+
httpserver.url,
|
241 |
+
match="Metcalf Bank",
|
242 |
+
) # attrs={"class": "dataTable"})
|
243 |
+
|
244 |
+
assert_framelist_equal(df1, df2)
|
245 |
+
|
246 |
+
@pytest.mark.network
|
247 |
+
@pytest.mark.single_cpu
|
248 |
+
def test_spam_url(self, httpserver, spam_data, flavor_read_html):
|
249 |
+
with open(spam_data, encoding="utf-8") as f:
|
250 |
+
httpserver.serve_content(content=f.read())
|
251 |
+
df1 = flavor_read_html(httpserver.url, match=".*Water.*")
|
252 |
+
df2 = flavor_read_html(httpserver.url, match="Unit")
|
253 |
+
|
254 |
+
assert_framelist_equal(df1, df2)
|
255 |
+
|
256 |
+
@pytest.mark.slow
|
257 |
+
def test_banklist(self, banklist_data, flavor_read_html):
|
258 |
+
df1 = flavor_read_html(
|
259 |
+
banklist_data, match=".*Florida.*", attrs={"id": "table"}
|
260 |
+
)
|
261 |
+
df2 = flavor_read_html(
|
262 |
+
banklist_data, match="Metcalf Bank", attrs={"id": "table"}
|
263 |
+
)
|
264 |
+
|
265 |
+
assert_framelist_equal(df1, df2)
|
266 |
+
|
267 |
+
def test_spam(self, spam_data, flavor_read_html):
|
268 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*")
|
269 |
+
df2 = flavor_read_html(spam_data, match="Unit")
|
270 |
+
assert_framelist_equal(df1, df2)
|
271 |
+
|
272 |
+
assert df1[0].iloc[0, 0] == "Proximates"
|
273 |
+
assert df1[0].columns[0] == "Nutrient"
|
274 |
+
|
275 |
+
def test_spam_no_match(self, spam_data, flavor_read_html):
|
276 |
+
dfs = flavor_read_html(spam_data)
|
277 |
+
for df in dfs:
|
278 |
+
assert isinstance(df, DataFrame)
|
279 |
+
|
280 |
+
def test_banklist_no_match(self, banklist_data, flavor_read_html):
|
281 |
+
dfs = flavor_read_html(banklist_data, attrs={"id": "table"})
|
282 |
+
for df in dfs:
|
283 |
+
assert isinstance(df, DataFrame)
|
284 |
+
|
285 |
+
def test_spam_header(self, spam_data, flavor_read_html):
|
286 |
+
df = flavor_read_html(spam_data, match=".*Water.*", header=2)[0]
|
287 |
+
assert df.columns[0] == "Proximates"
|
288 |
+
assert not df.empty
|
289 |
+
|
290 |
+
def test_skiprows_int(self, spam_data, flavor_read_html):
|
291 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=1)
|
292 |
+
df2 = flavor_read_html(spam_data, match="Unit", skiprows=1)
|
293 |
+
|
294 |
+
assert_framelist_equal(df1, df2)
|
295 |
+
|
296 |
+
def test_skiprows_range(self, spam_data, flavor_read_html):
|
297 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=range(2))
|
298 |
+
df2 = flavor_read_html(spam_data, match="Unit", skiprows=range(2))
|
299 |
+
|
300 |
+
assert_framelist_equal(df1, df2)
|
301 |
+
|
302 |
+
def test_skiprows_list(self, spam_data, flavor_read_html):
|
303 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=[1, 2])
|
304 |
+
df2 = flavor_read_html(spam_data, match="Unit", skiprows=[2, 1])
|
305 |
+
|
306 |
+
assert_framelist_equal(df1, df2)
|
307 |
+
|
308 |
+
def test_skiprows_set(self, spam_data, flavor_read_html):
|
309 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows={1, 2})
|
310 |
+
df2 = flavor_read_html(spam_data, match="Unit", skiprows={2, 1})
|
311 |
+
|
312 |
+
assert_framelist_equal(df1, df2)
|
313 |
+
|
314 |
+
def test_skiprows_slice(self, spam_data, flavor_read_html):
|
315 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=1)
|
316 |
+
df2 = flavor_read_html(spam_data, match="Unit", skiprows=1)
|
317 |
+
|
318 |
+
assert_framelist_equal(df1, df2)
|
319 |
+
|
320 |
+
def test_skiprows_slice_short(self, spam_data, flavor_read_html):
|
321 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=slice(2))
|
322 |
+
df2 = flavor_read_html(spam_data, match="Unit", skiprows=slice(2))
|
323 |
+
|
324 |
+
assert_framelist_equal(df1, df2)
|
325 |
+
|
326 |
+
def test_skiprows_slice_long(self, spam_data, flavor_read_html):
|
327 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=slice(2, 5))
|
328 |
+
df2 = flavor_read_html(spam_data, match="Unit", skiprows=slice(4, 1, -1))
|
329 |
+
|
330 |
+
assert_framelist_equal(df1, df2)
|
331 |
+
|
332 |
+
def test_skiprows_ndarray(self, spam_data, flavor_read_html):
|
333 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", skiprows=np.arange(2))
|
334 |
+
df2 = flavor_read_html(spam_data, match="Unit", skiprows=np.arange(2))
|
335 |
+
|
336 |
+
assert_framelist_equal(df1, df2)
|
337 |
+
|
338 |
+
def test_skiprows_invalid(self, spam_data, flavor_read_html):
|
339 |
+
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
|
340 |
+
flavor_read_html(spam_data, match=".*Water.*", skiprows="asdf")
|
341 |
+
|
342 |
+
def test_index(self, spam_data, flavor_read_html):
|
343 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", index_col=0)
|
344 |
+
df2 = flavor_read_html(spam_data, match="Unit", index_col=0)
|
345 |
+
assert_framelist_equal(df1, df2)
|
346 |
+
|
347 |
+
def test_header_and_index_no_types(self, spam_data, flavor_read_html):
|
348 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", header=1, index_col=0)
|
349 |
+
df2 = flavor_read_html(spam_data, match="Unit", header=1, index_col=0)
|
350 |
+
assert_framelist_equal(df1, df2)
|
351 |
+
|
352 |
+
def test_header_and_index_with_types(self, spam_data, flavor_read_html):
|
353 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", header=1, index_col=0)
|
354 |
+
df2 = flavor_read_html(spam_data, match="Unit", header=1, index_col=0)
|
355 |
+
assert_framelist_equal(df1, df2)
|
356 |
+
|
357 |
+
def test_infer_types(self, spam_data, flavor_read_html):
|
358 |
+
# 10892 infer_types removed
|
359 |
+
df1 = flavor_read_html(spam_data, match=".*Water.*", index_col=0)
|
360 |
+
df2 = flavor_read_html(spam_data, match="Unit", index_col=0)
|
361 |
+
assert_framelist_equal(df1, df2)
|
362 |
+
|
363 |
+
def test_string_io(self, spam_data, flavor_read_html):
|
364 |
+
with open(spam_data, encoding="UTF-8") as f:
|
365 |
+
data1 = StringIO(f.read())
|
366 |
+
|
367 |
+
with open(spam_data, encoding="UTF-8") as f:
|
368 |
+
data2 = StringIO(f.read())
|
369 |
+
|
370 |
+
df1 = flavor_read_html(data1, match=".*Water.*")
|
371 |
+
df2 = flavor_read_html(data2, match="Unit")
|
372 |
+
assert_framelist_equal(df1, df2)
|
373 |
+
|
374 |
+
def test_string(self, spam_data, flavor_read_html):
|
375 |
+
with open(spam_data, encoding="UTF-8") as f:
|
376 |
+
data = f.read()
|
377 |
+
|
378 |
+
df1 = flavor_read_html(StringIO(data), match=".*Water.*")
|
379 |
+
df2 = flavor_read_html(StringIO(data), match="Unit")
|
380 |
+
|
381 |
+
assert_framelist_equal(df1, df2)
|
382 |
+
|
383 |
+
def test_file_like(self, spam_data, flavor_read_html):
|
384 |
+
with open(spam_data, encoding="UTF-8") as f:
|
385 |
+
df1 = flavor_read_html(f, match=".*Water.*")
|
386 |
+
|
387 |
+
with open(spam_data, encoding="UTF-8") as f:
|
388 |
+
df2 = flavor_read_html(f, match="Unit")
|
389 |
+
|
390 |
+
assert_framelist_equal(df1, df2)
|
391 |
+
|
392 |
+
@pytest.mark.network
|
393 |
+
@pytest.mark.single_cpu
|
394 |
+
def test_bad_url_protocol(self, httpserver, flavor_read_html):
|
395 |
+
httpserver.serve_content("urlopen error unknown url type: git", code=404)
|
396 |
+
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
|
397 |
+
flavor_read_html("git://github.com", match=".*Water.*")
|
398 |
+
|
399 |
+
@pytest.mark.slow
|
400 |
+
@pytest.mark.network
|
401 |
+
@pytest.mark.single_cpu
|
402 |
+
def test_invalid_url(self, httpserver, flavor_read_html):
|
403 |
+
httpserver.serve_content("Name or service not known", code=404)
|
404 |
+
with pytest.raises((URLError, ValueError), match="HTTP Error 404: NOT FOUND"):
|
405 |
+
flavor_read_html(httpserver.url, match=".*Water.*")
|
406 |
+
|
407 |
+
@pytest.mark.slow
|
408 |
+
def test_file_url(self, banklist_data, flavor_read_html):
|
409 |
+
url = banklist_data
|
410 |
+
dfs = flavor_read_html(
|
411 |
+
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
|
412 |
+
)
|
413 |
+
assert isinstance(dfs, list)
|
414 |
+
for df in dfs:
|
415 |
+
assert isinstance(df, DataFrame)
|
416 |
+
|
417 |
+
@pytest.mark.slow
|
418 |
+
def test_invalid_table_attrs(self, banklist_data, flavor_read_html):
|
419 |
+
url = banklist_data
|
420 |
+
with pytest.raises(ValueError, match="No tables found"):
|
421 |
+
flavor_read_html(
|
422 |
+
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
|
423 |
+
)
|
424 |
+
|
425 |
+
@pytest.mark.slow
|
426 |
+
def test_multiindex_header(self, banklist_data, flavor_read_html):
|
427 |
+
df = flavor_read_html(
|
428 |
+
banklist_data, match="Metcalf", attrs={"id": "table"}, header=[0, 1]
|
429 |
+
)[0]
|
430 |
+
assert isinstance(df.columns, MultiIndex)
|
431 |
+
|
432 |
+
@pytest.mark.slow
|
433 |
+
def test_multiindex_index(self, banklist_data, flavor_read_html):
|
434 |
+
df = flavor_read_html(
|
435 |
+
banklist_data, match="Metcalf", attrs={"id": "table"}, index_col=[0, 1]
|
436 |
+
)[0]
|
437 |
+
assert isinstance(df.index, MultiIndex)
|
438 |
+
|
439 |
+
@pytest.mark.slow
|
440 |
+
def test_multiindex_header_index(self, banklist_data, flavor_read_html):
|
441 |
+
df = flavor_read_html(
|
442 |
+
banklist_data,
|
443 |
+
match="Metcalf",
|
444 |
+
attrs={"id": "table"},
|
445 |
+
header=[0, 1],
|
446 |
+
index_col=[0, 1],
|
447 |
+
)[0]
|
448 |
+
assert isinstance(df.columns, MultiIndex)
|
449 |
+
assert isinstance(df.index, MultiIndex)
|
450 |
+
|
451 |
+
@pytest.mark.slow
|
452 |
+
def test_multiindex_header_skiprows_tuples(self, banklist_data, flavor_read_html):
|
453 |
+
df = flavor_read_html(
|
454 |
+
banklist_data,
|
455 |
+
match="Metcalf",
|
456 |
+
attrs={"id": "table"},
|
457 |
+
header=[0, 1],
|
458 |
+
skiprows=1,
|
459 |
+
)[0]
|
460 |
+
assert isinstance(df.columns, MultiIndex)
|
461 |
+
|
462 |
+
@pytest.mark.slow
|
463 |
+
def test_multiindex_header_skiprows(self, banklist_data, flavor_read_html):
|
464 |
+
df = flavor_read_html(
|
465 |
+
banklist_data,
|
466 |
+
match="Metcalf",
|
467 |
+
attrs={"id": "table"},
|
468 |
+
header=[0, 1],
|
469 |
+
skiprows=1,
|
470 |
+
)[0]
|
471 |
+
assert isinstance(df.columns, MultiIndex)
|
472 |
+
|
473 |
+
@pytest.mark.slow
|
474 |
+
def test_multiindex_header_index_skiprows(self, banklist_data, flavor_read_html):
|
475 |
+
df = flavor_read_html(
|
476 |
+
banklist_data,
|
477 |
+
match="Metcalf",
|
478 |
+
attrs={"id": "table"},
|
479 |
+
header=[0, 1],
|
480 |
+
index_col=[0, 1],
|
481 |
+
skiprows=1,
|
482 |
+
)[0]
|
483 |
+
assert isinstance(df.index, MultiIndex)
|
484 |
+
assert isinstance(df.columns, MultiIndex)
|
485 |
+
|
486 |
+
@pytest.mark.slow
|
487 |
+
def test_regex_idempotency(self, banklist_data, flavor_read_html):
|
488 |
+
url = banklist_data
|
489 |
+
dfs = flavor_read_html(
|
490 |
+
file_path_to_url(os.path.abspath(url)),
|
491 |
+
match=re.compile(re.compile("Florida")),
|
492 |
+
attrs={"id": "table"},
|
493 |
+
)
|
494 |
+
assert isinstance(dfs, list)
|
495 |
+
for df in dfs:
|
496 |
+
assert isinstance(df, DataFrame)
|
497 |
+
|
498 |
+
def test_negative_skiprows(self, spam_data, flavor_read_html):
|
499 |
+
msg = r"\(you passed a negative value\)"
|
500 |
+
with pytest.raises(ValueError, match=msg):
|
501 |
+
flavor_read_html(spam_data, match="Water", skiprows=-1)
|
502 |
+
|
503 |
+
@pytest.fixture
|
504 |
+
def python_docs(self):
|
505 |
+
return """
|
506 |
+
<table class="contentstable" align="center"><tr>
|
507 |
+
<td width="50%">
|
508 |
+
<p class="biglink"><a class="biglink" href="whatsnew/2.7.html">What's new in Python 2.7?</a><br/>
|
509 |
+
<span class="linkdescr">or <a href="whatsnew/index.html">all "What's new" documents</a> since 2.0</span></p>
|
510 |
+
<p class="biglink"><a class="biglink" href="tutorial/index.html">Tutorial</a><br/>
|
511 |
+
<span class="linkdescr">start here</span></p>
|
512 |
+
<p class="biglink"><a class="biglink" href="library/index.html">Library Reference</a><br/>
|
513 |
+
<span class="linkdescr">keep this under your pillow</span></p>
|
514 |
+
<p class="biglink"><a class="biglink" href="reference/index.html">Language Reference</a><br/>
|
515 |
+
<span class="linkdescr">describes syntax and language elements</span></p>
|
516 |
+
<p class="biglink"><a class="biglink" href="using/index.html">Python Setup and Usage</a><br/>
|
517 |
+
<span class="linkdescr">how to use Python on different platforms</span></p>
|
518 |
+
<p class="biglink"><a class="biglink" href="howto/index.html">Python HOWTOs</a><br/>
|
519 |
+
<span class="linkdescr">in-depth documents on specific topics</span></p>
|
520 |
+
</td><td width="50%">
|
521 |
+
<p class="biglink"><a class="biglink" href="installing/index.html">Installing Python Modules</a><br/>
|
522 |
+
<span class="linkdescr">installing from the Python Package Index & other sources</span></p>
|
523 |
+
<p class="biglink"><a class="biglink" href="distributing/index.html">Distributing Python Modules</a><br/>
|
524 |
+
<span class="linkdescr">publishing modules for installation by others</span></p>
|
525 |
+
<p class="biglink"><a class="biglink" href="extending/index.html">Extending and Embedding</a><br/>
|
526 |
+
<span class="linkdescr">tutorial for C/C++ programmers</span></p>
|
527 |
+
<p class="biglink"><a class="biglink" href="c-api/index.html">Python/C API</a><br/>
|
528 |
+
<span class="linkdescr">reference for C/C++ programmers</span></p>
|
529 |
+
<p class="biglink"><a class="biglink" href="faq/index.html">FAQs</a><br/>
|
530 |
+
<span class="linkdescr">frequently asked questions (with answers!)</span></p>
|
531 |
+
</td></tr>
|
532 |
+
</table>
|
533 |
+
|
534 |
+
<p><strong>Indices and tables:</strong></p>
|
535 |
+
<table class="contentstable" align="center"><tr>
|
536 |
+
<td width="50%">
|
537 |
+
<p class="biglink"><a class="biglink" href="py-modindex.html">Python Global Module Index</a><br/>
|
538 |
+
<span class="linkdescr">quick access to all modules</span></p>
|
539 |
+
<p class="biglink"><a class="biglink" href="genindex.html">General Index</a><br/>
|
540 |
+
<span class="linkdescr">all functions, classes, terms</span></p>
|
541 |
+
<p class="biglink"><a class="biglink" href="glossary.html">Glossary</a><br/>
|
542 |
+
<span class="linkdescr">the most important terms explained</span></p>
|
543 |
+
</td><td width="50%">
|
544 |
+
<p class="biglink"><a class="biglink" href="search.html">Search page</a><br/>
|
545 |
+
<span class="linkdescr">search this documentation</span></p>
|
546 |
+
<p class="biglink"><a class="biglink" href="contents.html">Complete Table of Contents</a><br/>
|
547 |
+
<span class="linkdescr">lists all sections and subsections</span></p>
|
548 |
+
</td></tr>
|
549 |
+
</table>
|
550 |
+
""" # noqa: E501
|
551 |
+
|
552 |
+
@pytest.mark.network
|
553 |
+
@pytest.mark.single_cpu
|
554 |
+
def test_multiple_matches(self, python_docs, httpserver, flavor_read_html):
|
555 |
+
httpserver.serve_content(content=python_docs)
|
556 |
+
dfs = flavor_read_html(httpserver.url, match="Python")
|
557 |
+
assert len(dfs) > 1
|
558 |
+
|
559 |
+
@pytest.mark.network
|
560 |
+
@pytest.mark.single_cpu
|
561 |
+
def test_python_docs_table(self, python_docs, httpserver, flavor_read_html):
|
562 |
+
httpserver.serve_content(content=python_docs)
|
563 |
+
dfs = flavor_read_html(httpserver.url, match="Python")
|
564 |
+
zz = [df.iloc[0, 0][0:4] for df in dfs]
|
565 |
+
assert sorted(zz) == ["Pyth", "What"]
|
566 |
+
|
567 |
+
def test_empty_tables(self, flavor_read_html):
|
568 |
+
"""
|
569 |
+
Make sure that read_html ignores empty tables.
|
570 |
+
"""
|
571 |
+
html = """
|
572 |
+
<table>
|
573 |
+
<thead>
|
574 |
+
<tr>
|
575 |
+
<th>A</th>
|
576 |
+
<th>B</th>
|
577 |
+
</tr>
|
578 |
+
</thead>
|
579 |
+
<tbody>
|
580 |
+
<tr>
|
581 |
+
<td>1</td>
|
582 |
+
<td>2</td>
|
583 |
+
</tr>
|
584 |
+
</tbody>
|
585 |
+
</table>
|
586 |
+
<table>
|
587 |
+
<tbody>
|
588 |
+
</tbody>
|
589 |
+
</table>
|
590 |
+
"""
|
591 |
+
result = flavor_read_html(StringIO(html))
|
592 |
+
assert len(result) == 1
|
593 |
+
|
594 |
+
def test_multiple_tbody(self, flavor_read_html):
|
595 |
+
# GH-20690
|
596 |
+
# Read all tbody tags within a single table.
|
597 |
+
result = flavor_read_html(
|
598 |
+
StringIO(
|
599 |
+
"""<table>
|
600 |
+
<thead>
|
601 |
+
<tr>
|
602 |
+
<th>A</th>
|
603 |
+
<th>B</th>
|
604 |
+
</tr>
|
605 |
+
</thead>
|
606 |
+
<tbody>
|
607 |
+
<tr>
|
608 |
+
<td>1</td>
|
609 |
+
<td>2</td>
|
610 |
+
</tr>
|
611 |
+
</tbody>
|
612 |
+
<tbody>
|
613 |
+
<tr>
|
614 |
+
<td>3</td>
|
615 |
+
<td>4</td>
|
616 |
+
</tr>
|
617 |
+
</tbody>
|
618 |
+
</table>"""
|
619 |
+
)
|
620 |
+
)[0]
|
621 |
+
|
622 |
+
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
|
623 |
+
|
624 |
+
tm.assert_frame_equal(result, expected)
|
625 |
+
|
626 |
+
def test_header_and_one_column(self, flavor_read_html):
|
627 |
+
"""
|
628 |
+
Don't fail with bs4 when there is a header and only one column
|
629 |
+
as described in issue #9178
|
630 |
+
"""
|
631 |
+
result = flavor_read_html(
|
632 |
+
StringIO(
|
633 |
+
"""<table>
|
634 |
+
<thead>
|
635 |
+
<tr>
|
636 |
+
<th>Header</th>
|
637 |
+
</tr>
|
638 |
+
</thead>
|
639 |
+
<tbody>
|
640 |
+
<tr>
|
641 |
+
<td>first</td>
|
642 |
+
</tr>
|
643 |
+
</tbody>
|
644 |
+
</table>"""
|
645 |
+
)
|
646 |
+
)[0]
|
647 |
+
|
648 |
+
expected = DataFrame(data={"Header": "first"}, index=[0])
|
649 |
+
|
650 |
+
tm.assert_frame_equal(result, expected)
|
651 |
+
|
652 |
+
def test_thead_without_tr(self, flavor_read_html):
|
653 |
+
"""
|
654 |
+
Ensure parser adds <tr> within <thead> on malformed HTML.
|
655 |
+
"""
|
656 |
+
result = flavor_read_html(
|
657 |
+
StringIO(
|
658 |
+
"""<table>
|
659 |
+
<thead>
|
660 |
+
<tr>
|
661 |
+
<th>Country</th>
|
662 |
+
<th>Municipality</th>
|
663 |
+
<th>Year</th>
|
664 |
+
</tr>
|
665 |
+
</thead>
|
666 |
+
<tbody>
|
667 |
+
<tr>
|
668 |
+
<td>Ukraine</td>
|
669 |
+
<th>Odessa</th>
|
670 |
+
<td>1944</td>
|
671 |
+
</tr>
|
672 |
+
</tbody>
|
673 |
+
</table>"""
|
674 |
+
)
|
675 |
+
)[0]
|
676 |
+
|
677 |
+
expected = DataFrame(
|
678 |
+
data=[["Ukraine", "Odessa", 1944]],
|
679 |
+
columns=["Country", "Municipality", "Year"],
|
680 |
+
)
|
681 |
+
|
682 |
+
tm.assert_frame_equal(result, expected)
|
683 |
+
|
684 |
+
def test_tfoot_read(self, flavor_read_html):
|
685 |
+
"""
|
686 |
+
Make sure that read_html reads tfoot, containing td or th.
|
687 |
+
Ignores empty tfoot
|
688 |
+
"""
|
689 |
+
data_template = """<table>
|
690 |
+
<thead>
|
691 |
+
<tr>
|
692 |
+
<th>A</th>
|
693 |
+
<th>B</th>
|
694 |
+
</tr>
|
695 |
+
</thead>
|
696 |
+
<tbody>
|
697 |
+
<tr>
|
698 |
+
<td>bodyA</td>
|
699 |
+
<td>bodyB</td>
|
700 |
+
</tr>
|
701 |
+
</tbody>
|
702 |
+
<tfoot>
|
703 |
+
{footer}
|
704 |
+
</tfoot>
|
705 |
+
</table>"""
|
706 |
+
|
707 |
+
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
|
708 |
+
|
709 |
+
expected2 = DataFrame(
|
710 |
+
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
|
711 |
+
)
|
712 |
+
|
713 |
+
data1 = data_template.format(footer="")
|
714 |
+
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
|
715 |
+
|
716 |
+
result1 = flavor_read_html(StringIO(data1))[0]
|
717 |
+
result2 = flavor_read_html(StringIO(data2))[0]
|
718 |
+
|
719 |
+
tm.assert_frame_equal(result1, expected1)
|
720 |
+
tm.assert_frame_equal(result2, expected2)
|
721 |
+
|
722 |
+
def test_parse_header_of_non_string_column(self, flavor_read_html):
|
723 |
+
# GH5048: if header is specified explicitly, an int column should be
|
724 |
+
# parsed as int while its header is parsed as str
|
725 |
+
result = flavor_read_html(
|
726 |
+
StringIO(
|
727 |
+
"""
|
728 |
+
<table>
|
729 |
+
<tr>
|
730 |
+
<td>S</td>
|
731 |
+
<td>I</td>
|
732 |
+
</tr>
|
733 |
+
<tr>
|
734 |
+
<td>text</td>
|
735 |
+
<td>1944</td>
|
736 |
+
</tr>
|
737 |
+
</table>
|
738 |
+
"""
|
739 |
+
),
|
740 |
+
header=0,
|
741 |
+
)[0]
|
742 |
+
|
743 |
+
expected = DataFrame([["text", 1944]], columns=("S", "I"))
|
744 |
+
|
745 |
+
tm.assert_frame_equal(result, expected)
|
746 |
+
|
747 |
+
@pytest.mark.slow
|
748 |
+
def test_banklist_header(self, banklist_data, datapath, flavor_read_html):
|
749 |
+
from pandas.io.html import _remove_whitespace
|
750 |
+
|
751 |
+
def try_remove_ws(x):
|
752 |
+
try:
|
753 |
+
return _remove_whitespace(x)
|
754 |
+
except AttributeError:
|
755 |
+
return x
|
756 |
+
|
757 |
+
df = flavor_read_html(banklist_data, match="Metcalf", attrs={"id": "table"})[0]
|
758 |
+
ground_truth = read_csv(
|
759 |
+
datapath("io", "data", "csv", "banklist.csv"),
|
760 |
+
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
|
761 |
+
)
|
762 |
+
assert df.shape == ground_truth.shape
|
763 |
+
old = [
|
764 |
+
"First Vietnamese American Bank In Vietnamese",
|
765 |
+
"Westernbank Puerto Rico En Espanol",
|
766 |
+
"R-G Premier Bank of Puerto Rico En Espanol",
|
767 |
+
"Eurobank En Espanol",
|
768 |
+
"Sanderson State Bank En Espanol",
|
769 |
+
"Washington Mutual Bank (Including its subsidiary Washington "
|
770 |
+
"Mutual Bank FSB)",
|
771 |
+
"Silver State Bank En Espanol",
|
772 |
+
"AmTrade International Bank En Espanol",
|
773 |
+
"Hamilton Bank, NA En Espanol",
|
774 |
+
"The Citizens Savings Bank Pioneer Community Bank, Inc.",
|
775 |
+
]
|
776 |
+
new = [
|
777 |
+
"First Vietnamese American Bank",
|
778 |
+
"Westernbank Puerto Rico",
|
779 |
+
"R-G Premier Bank of Puerto Rico",
|
780 |
+
"Eurobank",
|
781 |
+
"Sanderson State Bank",
|
782 |
+
"Washington Mutual Bank",
|
783 |
+
"Silver State Bank",
|
784 |
+
"AmTrade International Bank",
|
785 |
+
"Hamilton Bank, NA",
|
786 |
+
"The Citizens Savings Bank",
|
787 |
+
]
|
788 |
+
dfnew = df.map(try_remove_ws).replace(old, new)
|
789 |
+
gtnew = ground_truth.map(try_remove_ws)
|
790 |
+
converted = dfnew
|
791 |
+
date_cols = ["Closing Date", "Updated Date"]
|
792 |
+
converted[date_cols] = converted[date_cols].apply(to_datetime)
|
793 |
+
tm.assert_frame_equal(converted, gtnew)
|
794 |
+
|
795 |
+
@pytest.mark.slow
|
796 |
+
def test_gold_canyon(self, banklist_data, flavor_read_html):
|
797 |
+
gc = "Gold Canyon"
|
798 |
+
with open(banklist_data, encoding="utf-8") as f:
|
799 |
+
raw_text = f.read()
|
800 |
+
|
801 |
+
assert gc in raw_text
|
802 |
+
df = flavor_read_html(
|
803 |
+
banklist_data, match="Gold Canyon", attrs={"id": "table"}
|
804 |
+
)[0]
|
805 |
+
assert gc in df.to_string()
|
806 |
+
|
807 |
+
def test_different_number_of_cols(self, flavor_read_html):
|
808 |
+
expected = flavor_read_html(
|
809 |
+
StringIO(
|
810 |
+
"""<table>
|
811 |
+
<thead>
|
812 |
+
<tr style="text-align: right;">
|
813 |
+
<th></th>
|
814 |
+
<th>C_l0_g0</th>
|
815 |
+
<th>C_l0_g1</th>
|
816 |
+
<th>C_l0_g2</th>
|
817 |
+
<th>C_l0_g3</th>
|
818 |
+
<th>C_l0_g4</th>
|
819 |
+
</tr>
|
820 |
+
</thead>
|
821 |
+
<tbody>
|
822 |
+
<tr>
|
823 |
+
<th>R_l0_g0</th>
|
824 |
+
<td> 0.763</td>
|
825 |
+
<td> 0.233</td>
|
826 |
+
<td> nan</td>
|
827 |
+
<td> nan</td>
|
828 |
+
<td> nan</td>
|
829 |
+
</tr>
|
830 |
+
<tr>
|
831 |
+
<th>R_l0_g1</th>
|
832 |
+
<td> 0.244</td>
|
833 |
+
<td> 0.285</td>
|
834 |
+
<td> 0.392</td>
|
835 |
+
<td> 0.137</td>
|
836 |
+
<td> 0.222</td>
|
837 |
+
</tr>
|
838 |
+
</tbody>
|
839 |
+
</table>"""
|
840 |
+
),
|
841 |
+
index_col=0,
|
842 |
+
)[0]
|
843 |
+
|
844 |
+
result = flavor_read_html(
|
845 |
+
StringIO(
|
846 |
+
"""<table>
|
847 |
+
<thead>
|
848 |
+
<tr style="text-align: right;">
|
849 |
+
<th></th>
|
850 |
+
<th>C_l0_g0</th>
|
851 |
+
<th>C_l0_g1</th>
|
852 |
+
<th>C_l0_g2</th>
|
853 |
+
<th>C_l0_g3</th>
|
854 |
+
<th>C_l0_g4</th>
|
855 |
+
</tr>
|
856 |
+
</thead>
|
857 |
+
<tbody>
|
858 |
+
<tr>
|
859 |
+
<th>R_l0_g0</th>
|
860 |
+
<td> 0.763</td>
|
861 |
+
<td> 0.233</td>
|
862 |
+
</tr>
|
863 |
+
<tr>
|
864 |
+
<th>R_l0_g1</th>
|
865 |
+
<td> 0.244</td>
|
866 |
+
<td> 0.285</td>
|
867 |
+
<td> 0.392</td>
|
868 |
+
<td> 0.137</td>
|
869 |
+
<td> 0.222</td>
|
870 |
+
</tr>
|
871 |
+
</tbody>
|
872 |
+
</table>"""
|
873 |
+
),
|
874 |
+
index_col=0,
|
875 |
+
)[0]
|
876 |
+
|
877 |
+
tm.assert_frame_equal(result, expected)
|
878 |
+
|
879 |
+
def test_colspan_rowspan_1(self, flavor_read_html):
|
880 |
+
# GH17054
|
881 |
+
result = flavor_read_html(
|
882 |
+
StringIO(
|
883 |
+
"""
|
884 |
+
<table>
|
885 |
+
<tr>
|
886 |
+
<th>A</th>
|
887 |
+
<th colspan="1">B</th>
|
888 |
+
<th rowspan="1">C</th>
|
889 |
+
</tr>
|
890 |
+
<tr>
|
891 |
+
<td>a</td>
|
892 |
+
<td>b</td>
|
893 |
+
<td>c</td>
|
894 |
+
</tr>
|
895 |
+
</table>
|
896 |
+
"""
|
897 |
+
)
|
898 |
+
)[0]
|
899 |
+
|
900 |
+
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
|
901 |
+
|
902 |
+
tm.assert_frame_equal(result, expected)
|
903 |
+
|
904 |
+
def test_colspan_rowspan_copy_values(self, flavor_read_html):
|
905 |
+
# GH17054
|
906 |
+
|
907 |
+
# In ASCII, with lowercase letters being copies:
|
908 |
+
#
|
909 |
+
# X x Y Z W
|
910 |
+
# A B b z C
|
911 |
+
|
912 |
+
result = flavor_read_html(
|
913 |
+
StringIO(
|
914 |
+
"""
|
915 |
+
<table>
|
916 |
+
<tr>
|
917 |
+
<td colspan="2">X</td>
|
918 |
+
<td>Y</td>
|
919 |
+
<td rowspan="2">Z</td>
|
920 |
+
<td>W</td>
|
921 |
+
</tr>
|
922 |
+
<tr>
|
923 |
+
<td>A</td>
|
924 |
+
<td colspan="2">B</td>
|
925 |
+
<td>C</td>
|
926 |
+
</tr>
|
927 |
+
</table>
|
928 |
+
"""
|
929 |
+
),
|
930 |
+
header=0,
|
931 |
+
)[0]
|
932 |
+
|
933 |
+
expected = DataFrame(
|
934 |
+
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
|
935 |
+
)
|
936 |
+
|
937 |
+
tm.assert_frame_equal(result, expected)
|
938 |
+
|
939 |
+
def test_colspan_rowspan_both_not_1(self, flavor_read_html):
|
940 |
+
# GH17054
|
941 |
+
|
942 |
+
# In ASCII, with lowercase letters being copies:
|
943 |
+
#
|
944 |
+
# A B b b C
|
945 |
+
# a b b b D
|
946 |
+
|
947 |
+
result = flavor_read_html(
|
948 |
+
StringIO(
|
949 |
+
"""
|
950 |
+
<table>
|
951 |
+
<tr>
|
952 |
+
<td rowspan="2">A</td>
|
953 |
+
<td rowspan="2" colspan="3">B</td>
|
954 |
+
<td>C</td>
|
955 |
+
</tr>
|
956 |
+
<tr>
|
957 |
+
<td>D</td>
|
958 |
+
</tr>
|
959 |
+
</table>
|
960 |
+
"""
|
961 |
+
),
|
962 |
+
header=0,
|
963 |
+
)[0]
|
964 |
+
|
965 |
+
expected = DataFrame(
|
966 |
+
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
|
967 |
+
)
|
968 |
+
|
969 |
+
tm.assert_frame_equal(result, expected)
|
970 |
+
|
971 |
+
def test_rowspan_at_end_of_row(self, flavor_read_html):
|
972 |
+
# GH17054
|
973 |
+
|
974 |
+
# In ASCII, with lowercase letters being copies:
|
975 |
+
#
|
976 |
+
# A B
|
977 |
+
# C b
|
978 |
+
|
979 |
+
result = flavor_read_html(
|
980 |
+
StringIO(
|
981 |
+
"""
|
982 |
+
<table>
|
983 |
+
<tr>
|
984 |
+
<td>A</td>
|
985 |
+
<td rowspan="2">B</td>
|
986 |
+
</tr>
|
987 |
+
<tr>
|
988 |
+
<td>C</td>
|
989 |
+
</tr>
|
990 |
+
</table>
|
991 |
+
"""
|
992 |
+
),
|
993 |
+
header=0,
|
994 |
+
)[0]
|
995 |
+
|
996 |
+
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
|
997 |
+
|
998 |
+
tm.assert_frame_equal(result, expected)
|
999 |
+
|
1000 |
+
def test_rowspan_only_rows(self, flavor_read_html):
|
1001 |
+
# GH17054
|
1002 |
+
|
1003 |
+
result = flavor_read_html(
|
1004 |
+
StringIO(
|
1005 |
+
"""
|
1006 |
+
<table>
|
1007 |
+
<tr>
|
1008 |
+
<td rowspan="3">A</td>
|
1009 |
+
<td rowspan="3">B</td>
|
1010 |
+
</tr>
|
1011 |
+
</table>
|
1012 |
+
"""
|
1013 |
+
),
|
1014 |
+
header=0,
|
1015 |
+
)[0]
|
1016 |
+
|
1017 |
+
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
|
1018 |
+
|
1019 |
+
tm.assert_frame_equal(result, expected)
|
1020 |
+
|
1021 |
+
def test_header_inferred_from_rows_with_only_th(self, flavor_read_html):
|
1022 |
+
# GH17054
|
1023 |
+
result = flavor_read_html(
|
1024 |
+
StringIO(
|
1025 |
+
"""
|
1026 |
+
<table>
|
1027 |
+
<tr>
|
1028 |
+
<th>A</th>
|
1029 |
+
<th>B</th>
|
1030 |
+
</tr>
|
1031 |
+
<tr>
|
1032 |
+
<th>a</th>
|
1033 |
+
<th>b</th>
|
1034 |
+
</tr>
|
1035 |
+
<tr>
|
1036 |
+
<td>1</td>
|
1037 |
+
<td>2</td>
|
1038 |
+
</tr>
|
1039 |
+
</table>
|
1040 |
+
"""
|
1041 |
+
)
|
1042 |
+
)[0]
|
1043 |
+
|
1044 |
+
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
|
1045 |
+
expected = DataFrame(data=[[1, 2]], columns=columns)
|
1046 |
+
|
1047 |
+
tm.assert_frame_equal(result, expected)
|
1048 |
+
|
1049 |
+
def test_parse_dates_list(self, flavor_read_html):
|
1050 |
+
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
|
1051 |
+
expected = df.to_html()
|
1052 |
+
res = flavor_read_html(StringIO(expected), parse_dates=[1], index_col=0)
|
1053 |
+
tm.assert_frame_equal(df, res[0])
|
1054 |
+
res = flavor_read_html(StringIO(expected), parse_dates=["date"], index_col=0)
|
1055 |
+
tm.assert_frame_equal(df, res[0])
|
1056 |
+
|
1057 |
+
def test_parse_dates_combine(self, flavor_read_html):
|
1058 |
+
raw_dates = Series(date_range("1/1/2001", periods=10))
|
1059 |
+
df = DataFrame(
|
1060 |
+
{
|
1061 |
+
"date": raw_dates.map(lambda x: str(x.date())),
|
1062 |
+
"time": raw_dates.map(lambda x: str(x.time())),
|
1063 |
+
}
|
1064 |
+
)
|
1065 |
+
res = flavor_read_html(
|
1066 |
+
StringIO(df.to_html()), parse_dates={"datetime": [1, 2]}, index_col=1
|
1067 |
+
)
|
1068 |
+
newdf = DataFrame({"datetime": raw_dates})
|
1069 |
+
tm.assert_frame_equal(newdf, res[0])
|
1070 |
+
|
1071 |
+
def test_wikipedia_states_table(self, datapath, flavor_read_html):
|
1072 |
+
data = datapath("io", "data", "html", "wikipedia_states.html")
|
1073 |
+
assert os.path.isfile(data), f"{repr(data)} is not a file"
|
1074 |
+
assert os.path.getsize(data), f"{repr(data)} is an empty file"
|
1075 |
+
result = flavor_read_html(data, match="Arizona", header=1)[0]
|
1076 |
+
assert result.shape == (60, 12)
|
1077 |
+
assert "Unnamed" in result.columns[-1]
|
1078 |
+
assert result["sq mi"].dtype == np.dtype("float64")
|
1079 |
+
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
|
1080 |
+
|
1081 |
+
def test_wikipedia_states_multiindex(self, datapath, flavor_read_html):
|
1082 |
+
data = datapath("io", "data", "html", "wikipedia_states.html")
|
1083 |
+
result = flavor_read_html(data, match="Arizona", index_col=0)[0]
|
1084 |
+
assert result.shape == (60, 11)
|
1085 |
+
assert "Unnamed" in result.columns[-1][1]
|
1086 |
+
assert result.columns.nlevels == 2
|
1087 |
+
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
|
1088 |
+
|
1089 |
+
def test_parser_error_on_empty_header_row(self, flavor_read_html):
|
1090 |
+
result = flavor_read_html(
|
1091 |
+
StringIO(
|
1092 |
+
"""
|
1093 |
+
<table>
|
1094 |
+
<thead>
|
1095 |
+
<tr><th></th><th></tr>
|
1096 |
+
<tr><th>A</th><th>B</th></tr>
|
1097 |
+
</thead>
|
1098 |
+
<tbody>
|
1099 |
+
<tr><td>a</td><td>b</td></tr>
|
1100 |
+
</tbody>
|
1101 |
+
</table>
|
1102 |
+
"""
|
1103 |
+
),
|
1104 |
+
header=[0, 1],
|
1105 |
+
)
|
1106 |
+
expected = DataFrame(
|
1107 |
+
[["a", "b"]],
|
1108 |
+
columns=MultiIndex.from_tuples(
|
1109 |
+
[("Unnamed: 0_level_0", "A"), ("Unnamed: 1_level_0", "B")]
|
1110 |
+
),
|
1111 |
+
)
|
1112 |
+
tm.assert_frame_equal(result[0], expected)
|
1113 |
+
|
1114 |
+
def test_decimal_rows(self, flavor_read_html):
|
1115 |
+
# GH 12907
|
1116 |
+
result = flavor_read_html(
|
1117 |
+
StringIO(
|
1118 |
+
"""<html>
|
1119 |
+
<body>
|
1120 |
+
<table>
|
1121 |
+
<thead>
|
1122 |
+
<tr>
|
1123 |
+
<th>Header</th>
|
1124 |
+
</tr>
|
1125 |
+
</thead>
|
1126 |
+
<tbody>
|
1127 |
+
<tr>
|
1128 |
+
<td>1100#101</td>
|
1129 |
+
</tr>
|
1130 |
+
</tbody>
|
1131 |
+
</table>
|
1132 |
+
</body>
|
1133 |
+
</html>"""
|
1134 |
+
),
|
1135 |
+
decimal="#",
|
1136 |
+
)[0]
|
1137 |
+
|
1138 |
+
expected = DataFrame(data={"Header": 1100.101}, index=[0])
|
1139 |
+
|
1140 |
+
assert result["Header"].dtype == np.dtype("float64")
|
1141 |
+
tm.assert_frame_equal(result, expected)
|
1142 |
+
|
1143 |
+
@pytest.mark.parametrize("arg", [True, False])
|
1144 |
+
def test_bool_header_arg(self, spam_data, arg, flavor_read_html):
|
1145 |
+
# GH 6114
|
1146 |
+
msg = re.escape(
|
1147 |
+
"Passing a bool to header is invalid. Use header=None for no header or "
|
1148 |
+
"header=int or list-like of ints to specify the row(s) making up the "
|
1149 |
+
"column names"
|
1150 |
+
)
|
1151 |
+
with pytest.raises(TypeError, match=msg):
|
1152 |
+
flavor_read_html(spam_data, header=arg)
|
1153 |
+
|
1154 |
+
def test_converters(self, flavor_read_html):
|
1155 |
+
# GH 13461
|
1156 |
+
result = flavor_read_html(
|
1157 |
+
StringIO(
|
1158 |
+
"""<table>
|
1159 |
+
<thead>
|
1160 |
+
<tr>
|
1161 |
+
<th>a</th>
|
1162 |
+
</tr>
|
1163 |
+
</thead>
|
1164 |
+
<tbody>
|
1165 |
+
<tr>
|
1166 |
+
<td> 0.763</td>
|
1167 |
+
</tr>
|
1168 |
+
<tr>
|
1169 |
+
<td> 0.244</td>
|
1170 |
+
</tr>
|
1171 |
+
</tbody>
|
1172 |
+
</table>"""
|
1173 |
+
),
|
1174 |
+
converters={"a": str},
|
1175 |
+
)[0]
|
1176 |
+
|
1177 |
+
expected = DataFrame({"a": ["0.763", "0.244"]})
|
1178 |
+
|
1179 |
+
tm.assert_frame_equal(result, expected)
|
1180 |
+
|
1181 |
+
def test_na_values(self, flavor_read_html):
|
1182 |
+
# GH 13461
|
1183 |
+
result = flavor_read_html(
|
1184 |
+
StringIO(
|
1185 |
+
"""<table>
|
1186 |
+
<thead>
|
1187 |
+
<tr>
|
1188 |
+
<th>a</th>
|
1189 |
+
</tr>
|
1190 |
+
</thead>
|
1191 |
+
<tbody>
|
1192 |
+
<tr>
|
1193 |
+
<td> 0.763</td>
|
1194 |
+
</tr>
|
1195 |
+
<tr>
|
1196 |
+
<td> 0.244</td>
|
1197 |
+
</tr>
|
1198 |
+
</tbody>
|
1199 |
+
</table>"""
|
1200 |
+
),
|
1201 |
+
na_values=[0.244],
|
1202 |
+
)[0]
|
1203 |
+
|
1204 |
+
expected = DataFrame({"a": [0.763, np.nan]})
|
1205 |
+
|
1206 |
+
tm.assert_frame_equal(result, expected)
|
1207 |
+
|
1208 |
+
def test_keep_default_na(self, flavor_read_html):
|
1209 |
+
html_data = """<table>
|
1210 |
+
<thead>
|
1211 |
+
<tr>
|
1212 |
+
<th>a</th>
|
1213 |
+
</tr>
|
1214 |
+
</thead>
|
1215 |
+
<tbody>
|
1216 |
+
<tr>
|
1217 |
+
<td> N/A</td>
|
1218 |
+
</tr>
|
1219 |
+
<tr>
|
1220 |
+
<td> NA</td>
|
1221 |
+
</tr>
|
1222 |
+
</tbody>
|
1223 |
+
</table>"""
|
1224 |
+
|
1225 |
+
expected_df = DataFrame({"a": ["N/A", "NA"]})
|
1226 |
+
html_df = flavor_read_html(StringIO(html_data), keep_default_na=False)[0]
|
1227 |
+
tm.assert_frame_equal(expected_df, html_df)
|
1228 |
+
|
1229 |
+
expected_df = DataFrame({"a": [np.nan, np.nan]})
|
1230 |
+
html_df = flavor_read_html(StringIO(html_data), keep_default_na=True)[0]
|
1231 |
+
tm.assert_frame_equal(expected_df, html_df)
|
1232 |
+
|
1233 |
+
def test_preserve_empty_rows(self, flavor_read_html):
|
1234 |
+
result = flavor_read_html(
|
1235 |
+
StringIO(
|
1236 |
+
"""
|
1237 |
+
<table>
|
1238 |
+
<tr>
|
1239 |
+
<th>A</th>
|
1240 |
+
<th>B</th>
|
1241 |
+
</tr>
|
1242 |
+
<tr>
|
1243 |
+
<td>a</td>
|
1244 |
+
<td>b</td>
|
1245 |
+
</tr>
|
1246 |
+
<tr>
|
1247 |
+
<td></td>
|
1248 |
+
<td></td>
|
1249 |
+
</tr>
|
1250 |
+
</table>
|
1251 |
+
"""
|
1252 |
+
)
|
1253 |
+
)[0]
|
1254 |
+
|
1255 |
+
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
|
1256 |
+
|
1257 |
+
tm.assert_frame_equal(result, expected)
|
1258 |
+
|
1259 |
+
def test_ignore_empty_rows_when_inferring_header(self, flavor_read_html):
|
1260 |
+
result = flavor_read_html(
|
1261 |
+
StringIO(
|
1262 |
+
"""
|
1263 |
+
<table>
|
1264 |
+
<thead>
|
1265 |
+
<tr><th></th><th></tr>
|
1266 |
+
<tr><th>A</th><th>B</th></tr>
|
1267 |
+
<tr><th>a</th><th>b</th></tr>
|
1268 |
+
</thead>
|
1269 |
+
<tbody>
|
1270 |
+
<tr><td>1</td><td>2</td></tr>
|
1271 |
+
</tbody>
|
1272 |
+
</table>
|
1273 |
+
"""
|
1274 |
+
)
|
1275 |
+
)[0]
|
1276 |
+
|
1277 |
+
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
|
1278 |
+
expected = DataFrame(data=[[1, 2]], columns=columns)
|
1279 |
+
|
1280 |
+
tm.assert_frame_equal(result, expected)
|
1281 |
+
|
1282 |
+
def test_multiple_header_rows(self, flavor_read_html):
|
1283 |
+
# Issue #13434
|
1284 |
+
expected_df = DataFrame(
|
1285 |
+
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
|
1286 |
+
)
|
1287 |
+
expected_df.columns = [
|
1288 |
+
["Unnamed: 0_level_0", "Age", "Party"],
|
1289 |
+
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
|
1290 |
+
]
|
1291 |
+
html = expected_df.to_html(index=False)
|
1292 |
+
html_df = flavor_read_html(StringIO(html))[0]
|
1293 |
+
tm.assert_frame_equal(expected_df, html_df)
|
1294 |
+
|
1295 |
+
def test_works_on_valid_markup(self, datapath, flavor_read_html):
|
1296 |
+
filename = datapath("io", "data", "html", "valid_markup.html")
|
1297 |
+
dfs = flavor_read_html(filename, index_col=0)
|
1298 |
+
assert isinstance(dfs, list)
|
1299 |
+
assert isinstance(dfs[0], DataFrame)
|
1300 |
+
|
1301 |
+
@pytest.mark.slow
|
1302 |
+
def test_fallback_success(self, datapath, flavor_read_html):
|
1303 |
+
banklist_data = datapath("io", "data", "html", "banklist.html")
|
1304 |
+
|
1305 |
+
flavor_read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
|
1306 |
+
|
1307 |
+
def test_to_html_timestamp(self):
|
1308 |
+
rng = date_range("2000-01-01", periods=10)
|
1309 |
+
df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)), index=rng)
|
1310 |
+
|
1311 |
+
result = df.to_html()
|
1312 |
+
assert "2000-01-01" in result
|
1313 |
+
|
1314 |
+
def test_to_html_borderless(self):
|
1315 |
+
df = DataFrame([{"A": 1, "B": 2}])
|
1316 |
+
out_border_default = df.to_html()
|
1317 |
+
out_border_true = df.to_html(border=True)
|
1318 |
+
out_border_explicit_default = df.to_html(border=1)
|
1319 |
+
out_border_nondefault = df.to_html(border=2)
|
1320 |
+
out_border_zero = df.to_html(border=0)
|
1321 |
+
|
1322 |
+
out_border_false = df.to_html(border=False)
|
1323 |
+
|
1324 |
+
assert ' border="1"' in out_border_default
|
1325 |
+
assert out_border_true == out_border_default
|
1326 |
+
assert out_border_default == out_border_explicit_default
|
1327 |
+
assert out_border_default != out_border_nondefault
|
1328 |
+
assert ' border="2"' in out_border_nondefault
|
1329 |
+
assert ' border="0"' not in out_border_zero
|
1330 |
+
assert " border" not in out_border_false
|
1331 |
+
assert out_border_zero == out_border_false
|
1332 |
+
|
1333 |
+
@pytest.mark.parametrize(
|
1334 |
+
"displayed_only,exp0,exp1",
|
1335 |
+
[
|
1336 |
+
(True, DataFrame(["foo"]), None),
|
1337 |
+
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
|
1338 |
+
],
|
1339 |
+
)
|
1340 |
+
def test_displayed_only(self, displayed_only, exp0, exp1, flavor_read_html):
|
1341 |
+
# GH 20027
|
1342 |
+
data = """<html>
|
1343 |
+
<body>
|
1344 |
+
<table>
|
1345 |
+
<tr>
|
1346 |
+
<td>
|
1347 |
+
foo
|
1348 |
+
<span style="display:none;text-align:center">bar</span>
|
1349 |
+
<span style="display:none">baz</span>
|
1350 |
+
<span style="display: none">qux</span>
|
1351 |
+
</td>
|
1352 |
+
</tr>
|
1353 |
+
</table>
|
1354 |
+
<table style="display: none">
|
1355 |
+
<tr>
|
1356 |
+
<td>foo</td>
|
1357 |
+
</tr>
|
1358 |
+
</table>
|
1359 |
+
</body>
|
1360 |
+
</html>"""
|
1361 |
+
|
1362 |
+
dfs = flavor_read_html(StringIO(data), displayed_only=displayed_only)
|
1363 |
+
tm.assert_frame_equal(dfs[0], exp0)
|
1364 |
+
|
1365 |
+
if exp1 is not None:
|
1366 |
+
tm.assert_frame_equal(dfs[1], exp1)
|
1367 |
+
else:
|
1368 |
+
assert len(dfs) == 1 # Should not parse hidden table
|
1369 |
+
|
1370 |
+
@pytest.mark.parametrize("displayed_only", [True, False])
|
1371 |
+
def test_displayed_only_with_many_elements(self, displayed_only, flavor_read_html):
|
1372 |
+
html_table = """
|
1373 |
+
<table>
|
1374 |
+
<tr>
|
1375 |
+
<th>A</th>
|
1376 |
+
<th>B</th>
|
1377 |
+
</tr>
|
1378 |
+
<tr>
|
1379 |
+
<td>1</td>
|
1380 |
+
<td>2</td>
|
1381 |
+
</tr>
|
1382 |
+
<tr>
|
1383 |
+
<td><span style="display:none"></span>4</td>
|
1384 |
+
<td>5</td>
|
1385 |
+
</tr>
|
1386 |
+
</table>
|
1387 |
+
"""
|
1388 |
+
result = flavor_read_html(StringIO(html_table), displayed_only=displayed_only)[
|
1389 |
+
0
|
1390 |
+
]
|
1391 |
+
expected = DataFrame({"A": [1, 4], "B": [2, 5]})
|
1392 |
+
tm.assert_frame_equal(result, expected)
|
1393 |
+
|
1394 |
+
@pytest.mark.filterwarnings(
|
1395 |
+
"ignore:You provided Unicode markup but also provided a value for "
|
1396 |
+
"from_encoding.*:UserWarning"
|
1397 |
+
)
|
1398 |
+
def test_encode(self, html_encoding_file, flavor_read_html):
|
1399 |
+
base_path = os.path.basename(html_encoding_file)
|
1400 |
+
root = os.path.splitext(base_path)[0]
|
1401 |
+
_, encoding = root.split("_")
|
1402 |
+
|
1403 |
+
try:
|
1404 |
+
with open(html_encoding_file, "rb") as fobj:
|
1405 |
+
from_string = flavor_read_html(
|
1406 |
+
fobj.read(), encoding=encoding, index_col=0
|
1407 |
+
).pop()
|
1408 |
+
|
1409 |
+
with open(html_encoding_file, "rb") as fobj:
|
1410 |
+
from_file_like = flavor_read_html(
|
1411 |
+
BytesIO(fobj.read()), encoding=encoding, index_col=0
|
1412 |
+
).pop()
|
1413 |
+
|
1414 |
+
from_filename = flavor_read_html(
|
1415 |
+
html_encoding_file, encoding=encoding, index_col=0
|
1416 |
+
).pop()
|
1417 |
+
tm.assert_frame_equal(from_string, from_file_like)
|
1418 |
+
tm.assert_frame_equal(from_string, from_filename)
|
1419 |
+
except Exception:
|
1420 |
+
# seems utf-16/32 fail on windows
|
1421 |
+
if is_platform_windows():
|
1422 |
+
if "16" in encoding or "32" in encoding:
|
1423 |
+
pytest.skip()
|
1424 |
+
raise
|
1425 |
+
|
1426 |
+
def test_parse_failure_unseekable(self, flavor_read_html):
|
1427 |
+
# Issue #17975
|
1428 |
+
|
1429 |
+
if flavor_read_html.keywords.get("flavor") == "lxml":
|
1430 |
+
pytest.skip("Not applicable for lxml")
|
1431 |
+
|
1432 |
+
class UnseekableStringIO(StringIO):
|
1433 |
+
def seekable(self):
|
1434 |
+
return False
|
1435 |
+
|
1436 |
+
bad = UnseekableStringIO(
|
1437 |
+
"""
|
1438 |
+
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
|
1439 |
+
)
|
1440 |
+
|
1441 |
+
assert flavor_read_html(bad)
|
1442 |
+
|
1443 |
+
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
|
1444 |
+
flavor_read_html(bad)
|
1445 |
+
|
1446 |
+
def test_parse_failure_rewinds(self, flavor_read_html):
|
1447 |
+
# Issue #17975
|
1448 |
+
|
1449 |
+
class MockFile:
|
1450 |
+
def __init__(self, data) -> None:
|
1451 |
+
self.data = data
|
1452 |
+
self.at_end = False
|
1453 |
+
|
1454 |
+
def read(self, size=None):
|
1455 |
+
data = "" if self.at_end else self.data
|
1456 |
+
self.at_end = True
|
1457 |
+
return data
|
1458 |
+
|
1459 |
+
def seek(self, offset):
|
1460 |
+
self.at_end = False
|
1461 |
+
|
1462 |
+
def seekable(self):
|
1463 |
+
return True
|
1464 |
+
|
1465 |
+
# GH 49036 pylint checks for presence of __next__ for iterators
|
1466 |
+
def __next__(self):
|
1467 |
+
...
|
1468 |
+
|
1469 |
+
def __iter__(self) -> Iterator:
|
1470 |
+
# `is_file_like` depends on the presence of
|
1471 |
+
# the __iter__ attribute.
|
1472 |
+
return self
|
1473 |
+
|
1474 |
+
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
|
1475 |
+
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
|
1476 |
+
|
1477 |
+
assert flavor_read_html(good)
|
1478 |
+
assert flavor_read_html(bad)
|
1479 |
+
|
1480 |
+
@pytest.mark.slow
|
1481 |
+
@pytest.mark.single_cpu
|
1482 |
+
def test_importcheck_thread_safety(self, datapath, flavor_read_html):
|
1483 |
+
# see gh-16928
|
1484 |
+
|
1485 |
+
class ErrorThread(threading.Thread):
|
1486 |
+
def run(self):
|
1487 |
+
try:
|
1488 |
+
super().run()
|
1489 |
+
except Exception as err:
|
1490 |
+
self.err = err
|
1491 |
+
else:
|
1492 |
+
self.err = None
|
1493 |
+
|
1494 |
+
filename = datapath("io", "data", "html", "valid_markup.html")
|
1495 |
+
helper_thread1 = ErrorThread(target=flavor_read_html, args=(filename,))
|
1496 |
+
helper_thread2 = ErrorThread(target=flavor_read_html, args=(filename,))
|
1497 |
+
|
1498 |
+
helper_thread1.start()
|
1499 |
+
helper_thread2.start()
|
1500 |
+
|
1501 |
+
while helper_thread1.is_alive() or helper_thread2.is_alive():
|
1502 |
+
pass
|
1503 |
+
assert None is helper_thread1.err is helper_thread2.err
|
1504 |
+
|
1505 |
+
def test_parse_path_object(self, datapath, flavor_read_html):
|
1506 |
+
# GH 37705
|
1507 |
+
file_path_string = datapath("io", "data", "html", "spam.html")
|
1508 |
+
file_path = Path(file_path_string)
|
1509 |
+
df1 = flavor_read_html(file_path_string)[0]
|
1510 |
+
df2 = flavor_read_html(file_path)[0]
|
1511 |
+
tm.assert_frame_equal(df1, df2)
|
1512 |
+
|
1513 |
+
def test_parse_br_as_space(self, flavor_read_html):
|
1514 |
+
# GH 29528: pd.read_html() convert <br> to space
|
1515 |
+
result = flavor_read_html(
|
1516 |
+
StringIO(
|
1517 |
+
"""
|
1518 |
+
<table>
|
1519 |
+
<tr>
|
1520 |
+
<th>A</th>
|
1521 |
+
</tr>
|
1522 |
+
<tr>
|
1523 |
+
<td>word1<br>word2</td>
|
1524 |
+
</tr>
|
1525 |
+
</table>
|
1526 |
+
"""
|
1527 |
+
)
|
1528 |
+
)[0]
|
1529 |
+
|
1530 |
+
expected = DataFrame(data=[["word1 word2"]], columns=["A"])
|
1531 |
+
|
1532 |
+
tm.assert_frame_equal(result, expected)
|
1533 |
+
|
1534 |
+
@pytest.mark.parametrize("arg", ["all", "body", "header", "footer"])
|
1535 |
+
def test_extract_links(self, arg, flavor_read_html):
|
1536 |
+
gh_13141_data = """
|
1537 |
+
<table>
|
1538 |
+
<tr>
|
1539 |
+
<th>HTTP</th>
|
1540 |
+
<th>FTP</th>
|
1541 |
+
<th><a href="https://en.wiktionary.org/wiki/linkless">Linkless</a></th>
|
1542 |
+
</tr>
|
1543 |
+
<tr>
|
1544 |
+
<td><a href="https://en.wikipedia.org/">Wikipedia</a></td>
|
1545 |
+
<td>SURROUNDING <a href="ftp://ftp.us.debian.org/">Debian</a> TEXT</td>
|
1546 |
+
<td>Linkless</td>
|
1547 |
+
</tr>
|
1548 |
+
<tfoot>
|
1549 |
+
<tr>
|
1550 |
+
<td><a href="https://en.wikipedia.org/wiki/Page_footer">Footer</a></td>
|
1551 |
+
<td>
|
1552 |
+
Multiple <a href="1">links:</a> <a href="2">Only first captured.</a>
|
1553 |
+
</td>
|
1554 |
+
</tr>
|
1555 |
+
</tfoot>
|
1556 |
+
</table>
|
1557 |
+
"""
|
1558 |
+
|
1559 |
+
gh_13141_expected = {
|
1560 |
+
"head_ignore": ["HTTP", "FTP", "Linkless"],
|
1561 |
+
"head_extract": [
|
1562 |
+
("HTTP", None),
|
1563 |
+
("FTP", None),
|
1564 |
+
("Linkless", "https://en.wiktionary.org/wiki/linkless"),
|
1565 |
+
],
|
1566 |
+
"body_ignore": ["Wikipedia", "SURROUNDING Debian TEXT", "Linkless"],
|
1567 |
+
"body_extract": [
|
1568 |
+
("Wikipedia", "https://en.wikipedia.org/"),
|
1569 |
+
("SURROUNDING Debian TEXT", "ftp://ftp.us.debian.org/"),
|
1570 |
+
("Linkless", None),
|
1571 |
+
],
|
1572 |
+
"footer_ignore": [
|
1573 |
+
"Footer",
|
1574 |
+
"Multiple links: Only first captured.",
|
1575 |
+
None,
|
1576 |
+
],
|
1577 |
+
"footer_extract": [
|
1578 |
+
("Footer", "https://en.wikipedia.org/wiki/Page_footer"),
|
1579 |
+
("Multiple links: Only first captured.", "1"),
|
1580 |
+
None,
|
1581 |
+
],
|
1582 |
+
}
|
1583 |
+
|
1584 |
+
data_exp = gh_13141_expected["body_ignore"]
|
1585 |
+
foot_exp = gh_13141_expected["footer_ignore"]
|
1586 |
+
head_exp = gh_13141_expected["head_ignore"]
|
1587 |
+
if arg == "all":
|
1588 |
+
data_exp = gh_13141_expected["body_extract"]
|
1589 |
+
foot_exp = gh_13141_expected["footer_extract"]
|
1590 |
+
head_exp = gh_13141_expected["head_extract"]
|
1591 |
+
elif arg == "body":
|
1592 |
+
data_exp = gh_13141_expected["body_extract"]
|
1593 |
+
elif arg == "footer":
|
1594 |
+
foot_exp = gh_13141_expected["footer_extract"]
|
1595 |
+
elif arg == "header":
|
1596 |
+
head_exp = gh_13141_expected["head_extract"]
|
1597 |
+
|
1598 |
+
result = flavor_read_html(StringIO(gh_13141_data), extract_links=arg)[0]
|
1599 |
+
expected = DataFrame([data_exp, foot_exp], columns=head_exp)
|
1600 |
+
expected = expected.fillna(np.nan)
|
1601 |
+
tm.assert_frame_equal(result, expected)
|
1602 |
+
|
1603 |
+
def test_extract_links_bad(self, spam_data):
|
1604 |
+
msg = (
|
1605 |
+
"`extract_links` must be one of "
|
1606 |
+
'{None, "header", "footer", "body", "all"}, got "incorrect"'
|
1607 |
+
)
|
1608 |
+
with pytest.raises(ValueError, match=msg):
|
1609 |
+
read_html(spam_data, extract_links="incorrect")
|
1610 |
+
|
1611 |
+
def test_extract_links_all_no_header(self, flavor_read_html):
|
1612 |
+
# GH 48316
|
1613 |
+
data = """
|
1614 |
+
<table>
|
1615 |
+
<tr>
|
1616 |
+
<td>
|
1617 |
+
<a href='https://google.com'>Google.com</a>
|
1618 |
+
</td>
|
1619 |
+
</tr>
|
1620 |
+
</table>
|
1621 |
+
"""
|
1622 |
+
result = flavor_read_html(StringIO(data), extract_links="all")[0]
|
1623 |
+
expected = DataFrame([[("Google.com", "https://google.com")]])
|
1624 |
+
tm.assert_frame_equal(result, expected)
|
1625 |
+
|
1626 |
+
def test_invalid_dtype_backend(self):
|
1627 |
+
msg = (
|
1628 |
+
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
|
1629 |
+
"'pyarrow' are allowed."
|
1630 |
+
)
|
1631 |
+
with pytest.raises(ValueError, match=msg):
|
1632 |
+
read_html("test", dtype_backend="numpy")
|
1633 |
+
|
1634 |
+
def test_style_tag(self, flavor_read_html):
|
1635 |
+
# GH 48316
|
1636 |
+
data = """
|
1637 |
+
<table>
|
1638 |
+
<tr>
|
1639 |
+
<th>
|
1640 |
+
<style>.style</style>
|
1641 |
+
A
|
1642 |
+
</th>
|
1643 |
+
<th>B</th>
|
1644 |
+
</tr>
|
1645 |
+
<tr>
|
1646 |
+
<td>A1</td>
|
1647 |
+
<td>B1</td>
|
1648 |
+
</tr>
|
1649 |
+
<tr>
|
1650 |
+
<td>A2</td>
|
1651 |
+
<td>B2</td>
|
1652 |
+
</tr>
|
1653 |
+
</table>
|
1654 |
+
"""
|
1655 |
+
result = flavor_read_html(StringIO(data))[0]
|
1656 |
+
expected = DataFrame(data=[["A1", "B1"], ["A2", "B2"]], columns=["A", "B"])
|
1657 |
+
tm.assert_frame_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_http_headers.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for the pandas custom headers in http(s) requests
|
3 |
+
"""
|
4 |
+
from functools import partial
|
5 |
+
import gzip
|
6 |
+
from io import BytesIO
|
7 |
+
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
import pandas.util._test_decorators as td
|
11 |
+
|
12 |
+
import pandas as pd
|
13 |
+
import pandas._testing as tm
|
14 |
+
|
15 |
+
pytestmark = [
|
16 |
+
pytest.mark.single_cpu,
|
17 |
+
pytest.mark.network,
|
18 |
+
pytest.mark.filterwarnings(
|
19 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
20 |
+
),
|
21 |
+
]
|
22 |
+
|
23 |
+
|
24 |
+
def gzip_bytes(response_bytes):
|
25 |
+
with BytesIO() as bio:
|
26 |
+
with gzip.GzipFile(fileobj=bio, mode="w") as zipper:
|
27 |
+
zipper.write(response_bytes)
|
28 |
+
return bio.getvalue()
|
29 |
+
|
30 |
+
|
31 |
+
def csv_responder(df):
|
32 |
+
return df.to_csv(index=False).encode("utf-8")
|
33 |
+
|
34 |
+
|
35 |
+
def gz_csv_responder(df):
|
36 |
+
return gzip_bytes(csv_responder(df))
|
37 |
+
|
38 |
+
|
39 |
+
def json_responder(df):
|
40 |
+
return df.to_json().encode("utf-8")
|
41 |
+
|
42 |
+
|
43 |
+
def gz_json_responder(df):
|
44 |
+
return gzip_bytes(json_responder(df))
|
45 |
+
|
46 |
+
|
47 |
+
def html_responder(df):
|
48 |
+
return df.to_html(index=False).encode("utf-8")
|
49 |
+
|
50 |
+
|
51 |
+
def parquetpyarrow_reponder(df):
|
52 |
+
return df.to_parquet(index=False, engine="pyarrow")
|
53 |
+
|
54 |
+
|
55 |
+
def parquetfastparquet_responder(df):
|
56 |
+
# the fastparquet engine doesn't like to write to a buffer
|
57 |
+
# it can do it via the open_with function being set appropriately
|
58 |
+
# however it automatically calls the close method and wipes the buffer
|
59 |
+
# so just overwrite that attribute on this instance to not do that
|
60 |
+
|
61 |
+
# protected by an importorskip in the respective test
|
62 |
+
import fsspec
|
63 |
+
|
64 |
+
df.to_parquet(
|
65 |
+
"memory://fastparquet_user_agent.parquet",
|
66 |
+
index=False,
|
67 |
+
engine="fastparquet",
|
68 |
+
compression=None,
|
69 |
+
)
|
70 |
+
with fsspec.open("memory://fastparquet_user_agent.parquet", "rb") as f:
|
71 |
+
return f.read()
|
72 |
+
|
73 |
+
|
74 |
+
def pickle_respnder(df):
|
75 |
+
with BytesIO() as bio:
|
76 |
+
df.to_pickle(bio)
|
77 |
+
return bio.getvalue()
|
78 |
+
|
79 |
+
|
80 |
+
def stata_responder(df):
|
81 |
+
with BytesIO() as bio:
|
82 |
+
df.to_stata(bio, write_index=False)
|
83 |
+
return bio.getvalue()
|
84 |
+
|
85 |
+
|
86 |
+
@pytest.mark.parametrize(
|
87 |
+
"responder, read_method",
|
88 |
+
[
|
89 |
+
(csv_responder, pd.read_csv),
|
90 |
+
(json_responder, pd.read_json),
|
91 |
+
(
|
92 |
+
html_responder,
|
93 |
+
lambda *args, **kwargs: pd.read_html(*args, **kwargs)[0],
|
94 |
+
),
|
95 |
+
pytest.param(
|
96 |
+
parquetpyarrow_reponder,
|
97 |
+
partial(pd.read_parquet, engine="pyarrow"),
|
98 |
+
marks=td.skip_if_no("pyarrow"),
|
99 |
+
),
|
100 |
+
pytest.param(
|
101 |
+
parquetfastparquet_responder,
|
102 |
+
partial(pd.read_parquet, engine="fastparquet"),
|
103 |
+
# TODO(ArrayManager) fastparquet
|
104 |
+
marks=[
|
105 |
+
td.skip_if_no("fastparquet"),
|
106 |
+
td.skip_if_no("fsspec"),
|
107 |
+
td.skip_array_manager_not_yet_implemented,
|
108 |
+
],
|
109 |
+
),
|
110 |
+
(pickle_respnder, pd.read_pickle),
|
111 |
+
(stata_responder, pd.read_stata),
|
112 |
+
(gz_csv_responder, pd.read_csv),
|
113 |
+
(gz_json_responder, pd.read_json),
|
114 |
+
],
|
115 |
+
)
|
116 |
+
@pytest.mark.parametrize(
|
117 |
+
"storage_options",
|
118 |
+
[
|
119 |
+
None,
|
120 |
+
{"User-Agent": "foo"},
|
121 |
+
{"User-Agent": "foo", "Auth": "bar"},
|
122 |
+
],
|
123 |
+
)
|
124 |
+
def test_request_headers(responder, read_method, httpserver, storage_options):
|
125 |
+
expected = pd.DataFrame({"a": ["b"]})
|
126 |
+
default_headers = ["Accept-Encoding", "Host", "Connection", "User-Agent"]
|
127 |
+
if "gz" in responder.__name__:
|
128 |
+
extra = {"Content-Encoding": "gzip"}
|
129 |
+
if storage_options is None:
|
130 |
+
storage_options = extra
|
131 |
+
else:
|
132 |
+
storage_options |= extra
|
133 |
+
else:
|
134 |
+
extra = None
|
135 |
+
expected_headers = set(default_headers).union(
|
136 |
+
storage_options.keys() if storage_options else []
|
137 |
+
)
|
138 |
+
httpserver.serve_content(content=responder(expected), headers=extra)
|
139 |
+
result = read_method(httpserver.url, storage_options=storage_options)
|
140 |
+
tm.assert_frame_equal(result, expected)
|
141 |
+
|
142 |
+
request_headers = dict(httpserver.requests[0].headers)
|
143 |
+
for header in expected_headers:
|
144 |
+
exp = request_headers.pop(header)
|
145 |
+
if storage_options and header in storage_options:
|
146 |
+
assert exp == storage_options[header]
|
147 |
+
# No extra headers added
|
148 |
+
assert not request_headers
|
149 |
+
|
150 |
+
|
151 |
+
@pytest.mark.parametrize(
|
152 |
+
"engine",
|
153 |
+
[
|
154 |
+
"pyarrow",
|
155 |
+
"fastparquet",
|
156 |
+
],
|
157 |
+
)
|
158 |
+
def test_to_parquet_to_disk_with_storage_options(engine):
|
159 |
+
headers = {
|
160 |
+
"User-Agent": "custom",
|
161 |
+
"Auth": "other_custom",
|
162 |
+
}
|
163 |
+
|
164 |
+
pytest.importorskip(engine)
|
165 |
+
|
166 |
+
true_df = pd.DataFrame({"column_name": ["column_value"]})
|
167 |
+
msg = (
|
168 |
+
"storage_options passed with file object or non-fsspec file path|"
|
169 |
+
"storage_options passed with buffer, or non-supported URL"
|
170 |
+
)
|
171 |
+
with pytest.raises(ValueError, match=msg):
|
172 |
+
true_df.to_parquet("/tmp/junk.parquet", storage_options=headers, engine=engine)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_orc.py
ADDED
@@ -0,0 +1,436 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" test orc compat """
|
2 |
+
import datetime
|
3 |
+
from decimal import Decimal
|
4 |
+
from io import BytesIO
|
5 |
+
import os
|
6 |
+
import pathlib
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import pytest
|
10 |
+
|
11 |
+
import pandas as pd
|
12 |
+
from pandas import read_orc
|
13 |
+
import pandas._testing as tm
|
14 |
+
from pandas.core.arrays import StringArray
|
15 |
+
|
16 |
+
pytest.importorskip("pyarrow.orc")
|
17 |
+
|
18 |
+
import pyarrow as pa
|
19 |
+
|
20 |
+
pytestmark = pytest.mark.filterwarnings(
|
21 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
22 |
+
)
|
23 |
+
|
24 |
+
|
25 |
+
@pytest.fixture
|
26 |
+
def dirpath(datapath):
|
27 |
+
return datapath("io", "data", "orc")
|
28 |
+
|
29 |
+
|
30 |
+
@pytest.fixture(
|
31 |
+
params=[
|
32 |
+
np.array([1, 20], dtype="uint64"),
|
33 |
+
pd.Series(["a", "b", "a"], dtype="category"),
|
34 |
+
[pd.Interval(left=0, right=2), pd.Interval(left=0, right=5)],
|
35 |
+
[pd.Period("2022-01-03", freq="D"), pd.Period("2022-01-04", freq="D")],
|
36 |
+
]
|
37 |
+
)
|
38 |
+
def orc_writer_dtypes_not_supported(request):
|
39 |
+
# Examples of dataframes with dtypes for which conversion to ORC
|
40 |
+
# hasn't been implemented yet, that is, Category, unsigned integers,
|
41 |
+
# interval, period and sparse.
|
42 |
+
return pd.DataFrame({"unimpl": request.param})
|
43 |
+
|
44 |
+
|
45 |
+
def test_orc_reader_empty(dirpath):
|
46 |
+
columns = [
|
47 |
+
"boolean1",
|
48 |
+
"byte1",
|
49 |
+
"short1",
|
50 |
+
"int1",
|
51 |
+
"long1",
|
52 |
+
"float1",
|
53 |
+
"double1",
|
54 |
+
"bytes1",
|
55 |
+
"string1",
|
56 |
+
]
|
57 |
+
dtypes = [
|
58 |
+
"bool",
|
59 |
+
"int8",
|
60 |
+
"int16",
|
61 |
+
"int32",
|
62 |
+
"int64",
|
63 |
+
"float32",
|
64 |
+
"float64",
|
65 |
+
"object",
|
66 |
+
"object",
|
67 |
+
]
|
68 |
+
expected = pd.DataFrame(index=pd.RangeIndex(0))
|
69 |
+
for colname, dtype in zip(columns, dtypes):
|
70 |
+
expected[colname] = pd.Series(dtype=dtype)
|
71 |
+
|
72 |
+
inputfile = os.path.join(dirpath, "TestOrcFile.emptyFile.orc")
|
73 |
+
got = read_orc(inputfile, columns=columns)
|
74 |
+
|
75 |
+
tm.assert_equal(expected, got)
|
76 |
+
|
77 |
+
|
78 |
+
def test_orc_reader_basic(dirpath):
|
79 |
+
data = {
|
80 |
+
"boolean1": np.array([False, True], dtype="bool"),
|
81 |
+
"byte1": np.array([1, 100], dtype="int8"),
|
82 |
+
"short1": np.array([1024, 2048], dtype="int16"),
|
83 |
+
"int1": np.array([65536, 65536], dtype="int32"),
|
84 |
+
"long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"),
|
85 |
+
"float1": np.array([1.0, 2.0], dtype="float32"),
|
86 |
+
"double1": np.array([-15.0, -5.0], dtype="float64"),
|
87 |
+
"bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"),
|
88 |
+
"string1": np.array(["hi", "bye"], dtype="object"),
|
89 |
+
}
|
90 |
+
expected = pd.DataFrame.from_dict(data)
|
91 |
+
|
92 |
+
inputfile = os.path.join(dirpath, "TestOrcFile.test1.orc")
|
93 |
+
got = read_orc(inputfile, columns=data.keys())
|
94 |
+
|
95 |
+
tm.assert_equal(expected, got)
|
96 |
+
|
97 |
+
|
98 |
+
def test_orc_reader_decimal(dirpath):
|
99 |
+
# Only testing the first 10 rows of data
|
100 |
+
data = {
|
101 |
+
"_col0": np.array(
|
102 |
+
[
|
103 |
+
Decimal("-1000.50000"),
|
104 |
+
Decimal("-999.60000"),
|
105 |
+
Decimal("-998.70000"),
|
106 |
+
Decimal("-997.80000"),
|
107 |
+
Decimal("-996.90000"),
|
108 |
+
Decimal("-995.10000"),
|
109 |
+
Decimal("-994.11000"),
|
110 |
+
Decimal("-993.12000"),
|
111 |
+
Decimal("-992.13000"),
|
112 |
+
Decimal("-991.14000"),
|
113 |
+
],
|
114 |
+
dtype="object",
|
115 |
+
)
|
116 |
+
}
|
117 |
+
expected = pd.DataFrame.from_dict(data)
|
118 |
+
|
119 |
+
inputfile = os.path.join(dirpath, "TestOrcFile.decimal.orc")
|
120 |
+
got = read_orc(inputfile).iloc[:10]
|
121 |
+
|
122 |
+
tm.assert_equal(expected, got)
|
123 |
+
|
124 |
+
|
125 |
+
def test_orc_reader_date_low(dirpath):
|
126 |
+
data = {
|
127 |
+
"time": np.array(
|
128 |
+
[
|
129 |
+
"1900-05-05 12:34:56.100000",
|
130 |
+
"1900-05-05 12:34:56.100100",
|
131 |
+
"1900-05-05 12:34:56.100200",
|
132 |
+
"1900-05-05 12:34:56.100300",
|
133 |
+
"1900-05-05 12:34:56.100400",
|
134 |
+
"1900-05-05 12:34:56.100500",
|
135 |
+
"1900-05-05 12:34:56.100600",
|
136 |
+
"1900-05-05 12:34:56.100700",
|
137 |
+
"1900-05-05 12:34:56.100800",
|
138 |
+
"1900-05-05 12:34:56.100900",
|
139 |
+
],
|
140 |
+
dtype="datetime64[ns]",
|
141 |
+
),
|
142 |
+
"date": np.array(
|
143 |
+
[
|
144 |
+
datetime.date(1900, 12, 25),
|
145 |
+
datetime.date(1900, 12, 25),
|
146 |
+
datetime.date(1900, 12, 25),
|
147 |
+
datetime.date(1900, 12, 25),
|
148 |
+
datetime.date(1900, 12, 25),
|
149 |
+
datetime.date(1900, 12, 25),
|
150 |
+
datetime.date(1900, 12, 25),
|
151 |
+
datetime.date(1900, 12, 25),
|
152 |
+
datetime.date(1900, 12, 25),
|
153 |
+
datetime.date(1900, 12, 25),
|
154 |
+
],
|
155 |
+
dtype="object",
|
156 |
+
),
|
157 |
+
}
|
158 |
+
expected = pd.DataFrame.from_dict(data)
|
159 |
+
|
160 |
+
inputfile = os.path.join(dirpath, "TestOrcFile.testDate1900.orc")
|
161 |
+
got = read_orc(inputfile).iloc[:10]
|
162 |
+
|
163 |
+
tm.assert_equal(expected, got)
|
164 |
+
|
165 |
+
|
166 |
+
def test_orc_reader_date_high(dirpath):
|
167 |
+
data = {
|
168 |
+
"time": np.array(
|
169 |
+
[
|
170 |
+
"2038-05-05 12:34:56.100000",
|
171 |
+
"2038-05-05 12:34:56.100100",
|
172 |
+
"2038-05-05 12:34:56.100200",
|
173 |
+
"2038-05-05 12:34:56.100300",
|
174 |
+
"2038-05-05 12:34:56.100400",
|
175 |
+
"2038-05-05 12:34:56.100500",
|
176 |
+
"2038-05-05 12:34:56.100600",
|
177 |
+
"2038-05-05 12:34:56.100700",
|
178 |
+
"2038-05-05 12:34:56.100800",
|
179 |
+
"2038-05-05 12:34:56.100900",
|
180 |
+
],
|
181 |
+
dtype="datetime64[ns]",
|
182 |
+
),
|
183 |
+
"date": np.array(
|
184 |
+
[
|
185 |
+
datetime.date(2038, 12, 25),
|
186 |
+
datetime.date(2038, 12, 25),
|
187 |
+
datetime.date(2038, 12, 25),
|
188 |
+
datetime.date(2038, 12, 25),
|
189 |
+
datetime.date(2038, 12, 25),
|
190 |
+
datetime.date(2038, 12, 25),
|
191 |
+
datetime.date(2038, 12, 25),
|
192 |
+
datetime.date(2038, 12, 25),
|
193 |
+
datetime.date(2038, 12, 25),
|
194 |
+
datetime.date(2038, 12, 25),
|
195 |
+
],
|
196 |
+
dtype="object",
|
197 |
+
),
|
198 |
+
}
|
199 |
+
expected = pd.DataFrame.from_dict(data)
|
200 |
+
|
201 |
+
inputfile = os.path.join(dirpath, "TestOrcFile.testDate2038.orc")
|
202 |
+
got = read_orc(inputfile).iloc[:10]
|
203 |
+
|
204 |
+
tm.assert_equal(expected, got)
|
205 |
+
|
206 |
+
|
207 |
+
def test_orc_reader_snappy_compressed(dirpath):
|
208 |
+
data = {
|
209 |
+
"int1": np.array(
|
210 |
+
[
|
211 |
+
-1160101563,
|
212 |
+
1181413113,
|
213 |
+
2065821249,
|
214 |
+
-267157795,
|
215 |
+
172111193,
|
216 |
+
1752363137,
|
217 |
+
1406072123,
|
218 |
+
1911809390,
|
219 |
+
-1308542224,
|
220 |
+
-467100286,
|
221 |
+
],
|
222 |
+
dtype="int32",
|
223 |
+
),
|
224 |
+
"string1": np.array(
|
225 |
+
[
|
226 |
+
"f50dcb8",
|
227 |
+
"382fdaaa",
|
228 |
+
"90758c6",
|
229 |
+
"9e8caf3f",
|
230 |
+
"ee97332b",
|
231 |
+
"d634da1",
|
232 |
+
"2bea4396",
|
233 |
+
"d67d89e8",
|
234 |
+
"ad71007e",
|
235 |
+
"e8c82066",
|
236 |
+
],
|
237 |
+
dtype="object",
|
238 |
+
),
|
239 |
+
}
|
240 |
+
expected = pd.DataFrame.from_dict(data)
|
241 |
+
|
242 |
+
inputfile = os.path.join(dirpath, "TestOrcFile.testSnappy.orc")
|
243 |
+
got = read_orc(inputfile).iloc[:10]
|
244 |
+
|
245 |
+
tm.assert_equal(expected, got)
|
246 |
+
|
247 |
+
|
248 |
+
def test_orc_roundtrip_file(dirpath):
|
249 |
+
# GH44554
|
250 |
+
# PyArrow gained ORC write support with the current argument order
|
251 |
+
pytest.importorskip("pyarrow")
|
252 |
+
|
253 |
+
data = {
|
254 |
+
"boolean1": np.array([False, True], dtype="bool"),
|
255 |
+
"byte1": np.array([1, 100], dtype="int8"),
|
256 |
+
"short1": np.array([1024, 2048], dtype="int16"),
|
257 |
+
"int1": np.array([65536, 65536], dtype="int32"),
|
258 |
+
"long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"),
|
259 |
+
"float1": np.array([1.0, 2.0], dtype="float32"),
|
260 |
+
"double1": np.array([-15.0, -5.0], dtype="float64"),
|
261 |
+
"bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"),
|
262 |
+
"string1": np.array(["hi", "bye"], dtype="object"),
|
263 |
+
}
|
264 |
+
expected = pd.DataFrame.from_dict(data)
|
265 |
+
|
266 |
+
with tm.ensure_clean() as path:
|
267 |
+
expected.to_orc(path)
|
268 |
+
got = read_orc(path)
|
269 |
+
|
270 |
+
tm.assert_equal(expected, got)
|
271 |
+
|
272 |
+
|
273 |
+
def test_orc_roundtrip_bytesio():
|
274 |
+
# GH44554
|
275 |
+
# PyArrow gained ORC write support with the current argument order
|
276 |
+
pytest.importorskip("pyarrow")
|
277 |
+
|
278 |
+
data = {
|
279 |
+
"boolean1": np.array([False, True], dtype="bool"),
|
280 |
+
"byte1": np.array([1, 100], dtype="int8"),
|
281 |
+
"short1": np.array([1024, 2048], dtype="int16"),
|
282 |
+
"int1": np.array([65536, 65536], dtype="int32"),
|
283 |
+
"long1": np.array([9223372036854775807, 9223372036854775807], dtype="int64"),
|
284 |
+
"float1": np.array([1.0, 2.0], dtype="float32"),
|
285 |
+
"double1": np.array([-15.0, -5.0], dtype="float64"),
|
286 |
+
"bytes1": np.array([b"\x00\x01\x02\x03\x04", b""], dtype="object"),
|
287 |
+
"string1": np.array(["hi", "bye"], dtype="object"),
|
288 |
+
}
|
289 |
+
expected = pd.DataFrame.from_dict(data)
|
290 |
+
|
291 |
+
bytes = expected.to_orc()
|
292 |
+
got = read_orc(BytesIO(bytes))
|
293 |
+
|
294 |
+
tm.assert_equal(expected, got)
|
295 |
+
|
296 |
+
|
297 |
+
def test_orc_writer_dtypes_not_supported(orc_writer_dtypes_not_supported):
|
298 |
+
# GH44554
|
299 |
+
# PyArrow gained ORC write support with the current argument order
|
300 |
+
pytest.importorskip("pyarrow")
|
301 |
+
|
302 |
+
msg = "The dtype of one or more columns is not supported yet."
|
303 |
+
with pytest.raises(NotImplementedError, match=msg):
|
304 |
+
orc_writer_dtypes_not_supported.to_orc()
|
305 |
+
|
306 |
+
|
307 |
+
def test_orc_dtype_backend_pyarrow():
|
308 |
+
pytest.importorskip("pyarrow")
|
309 |
+
df = pd.DataFrame(
|
310 |
+
{
|
311 |
+
"string": list("abc"),
|
312 |
+
"string_with_nan": ["a", np.nan, "c"],
|
313 |
+
"string_with_none": ["a", None, "c"],
|
314 |
+
"bytes": [b"foo", b"bar", None],
|
315 |
+
"int": list(range(1, 4)),
|
316 |
+
"float": np.arange(4.0, 7.0, dtype="float64"),
|
317 |
+
"float_with_nan": [2.0, np.nan, 3.0],
|
318 |
+
"bool": [True, False, True],
|
319 |
+
"bool_with_na": [True, False, None],
|
320 |
+
"datetime": pd.date_range("20130101", periods=3),
|
321 |
+
"datetime_with_nat": [
|
322 |
+
pd.Timestamp("20130101"),
|
323 |
+
pd.NaT,
|
324 |
+
pd.Timestamp("20130103"),
|
325 |
+
],
|
326 |
+
}
|
327 |
+
)
|
328 |
+
|
329 |
+
bytes_data = df.copy().to_orc()
|
330 |
+
result = read_orc(BytesIO(bytes_data), dtype_backend="pyarrow")
|
331 |
+
|
332 |
+
expected = pd.DataFrame(
|
333 |
+
{
|
334 |
+
col: pd.arrays.ArrowExtensionArray(pa.array(df[col], from_pandas=True))
|
335 |
+
for col in df.columns
|
336 |
+
}
|
337 |
+
)
|
338 |
+
|
339 |
+
tm.assert_frame_equal(result, expected)
|
340 |
+
|
341 |
+
|
342 |
+
def test_orc_dtype_backend_numpy_nullable():
|
343 |
+
# GH#50503
|
344 |
+
pytest.importorskip("pyarrow")
|
345 |
+
df = pd.DataFrame(
|
346 |
+
{
|
347 |
+
"string": list("abc"),
|
348 |
+
"string_with_nan": ["a", np.nan, "c"],
|
349 |
+
"string_with_none": ["a", None, "c"],
|
350 |
+
"int": list(range(1, 4)),
|
351 |
+
"int_with_nan": pd.Series([1, pd.NA, 3], dtype="Int64"),
|
352 |
+
"na_only": pd.Series([pd.NA, pd.NA, pd.NA], dtype="Int64"),
|
353 |
+
"float": np.arange(4.0, 7.0, dtype="float64"),
|
354 |
+
"float_with_nan": [2.0, np.nan, 3.0],
|
355 |
+
"bool": [True, False, True],
|
356 |
+
"bool_with_na": [True, False, None],
|
357 |
+
}
|
358 |
+
)
|
359 |
+
|
360 |
+
bytes_data = df.copy().to_orc()
|
361 |
+
result = read_orc(BytesIO(bytes_data), dtype_backend="numpy_nullable")
|
362 |
+
|
363 |
+
expected = pd.DataFrame(
|
364 |
+
{
|
365 |
+
"string": StringArray(np.array(["a", "b", "c"], dtype=np.object_)),
|
366 |
+
"string_with_nan": StringArray(
|
367 |
+
np.array(["a", pd.NA, "c"], dtype=np.object_)
|
368 |
+
),
|
369 |
+
"string_with_none": StringArray(
|
370 |
+
np.array(["a", pd.NA, "c"], dtype=np.object_)
|
371 |
+
),
|
372 |
+
"int": pd.Series([1, 2, 3], dtype="Int64"),
|
373 |
+
"int_with_nan": pd.Series([1, pd.NA, 3], dtype="Int64"),
|
374 |
+
"na_only": pd.Series([pd.NA, pd.NA, pd.NA], dtype="Int64"),
|
375 |
+
"float": pd.Series([4.0, 5.0, 6.0], dtype="Float64"),
|
376 |
+
"float_with_nan": pd.Series([2.0, pd.NA, 3.0], dtype="Float64"),
|
377 |
+
"bool": pd.Series([True, False, True], dtype="boolean"),
|
378 |
+
"bool_with_na": pd.Series([True, False, pd.NA], dtype="boolean"),
|
379 |
+
}
|
380 |
+
)
|
381 |
+
|
382 |
+
tm.assert_frame_equal(result, expected)
|
383 |
+
|
384 |
+
|
385 |
+
def test_orc_uri_path():
|
386 |
+
expected = pd.DataFrame({"int": list(range(1, 4))})
|
387 |
+
with tm.ensure_clean("tmp.orc") as path:
|
388 |
+
expected.to_orc(path)
|
389 |
+
uri = pathlib.Path(path).as_uri()
|
390 |
+
result = read_orc(uri)
|
391 |
+
tm.assert_frame_equal(result, expected)
|
392 |
+
|
393 |
+
|
394 |
+
@pytest.mark.parametrize(
|
395 |
+
"index",
|
396 |
+
[
|
397 |
+
pd.RangeIndex(start=2, stop=5, step=1),
|
398 |
+
pd.RangeIndex(start=0, stop=3, step=1, name="non-default"),
|
399 |
+
pd.Index([1, 2, 3]),
|
400 |
+
],
|
401 |
+
)
|
402 |
+
def test_to_orc_non_default_index(index):
|
403 |
+
df = pd.DataFrame({"a": [1, 2, 3]}, index=index)
|
404 |
+
msg = (
|
405 |
+
"orc does not support serializing a non-default index|"
|
406 |
+
"orc does not serialize index meta-data"
|
407 |
+
)
|
408 |
+
with pytest.raises(ValueError, match=msg):
|
409 |
+
df.to_orc()
|
410 |
+
|
411 |
+
|
412 |
+
def test_invalid_dtype_backend():
|
413 |
+
msg = (
|
414 |
+
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
|
415 |
+
"'pyarrow' are allowed."
|
416 |
+
)
|
417 |
+
df = pd.DataFrame({"int": list(range(1, 4))})
|
418 |
+
with tm.ensure_clean("tmp.orc") as path:
|
419 |
+
df.to_orc(path)
|
420 |
+
with pytest.raises(ValueError, match=msg):
|
421 |
+
read_orc(path, dtype_backend="numpy")
|
422 |
+
|
423 |
+
|
424 |
+
def test_string_inference(tmp_path):
|
425 |
+
# GH#54431
|
426 |
+
path = tmp_path / "test_string_inference.p"
|
427 |
+
df = pd.DataFrame(data={"a": ["x", "y"]})
|
428 |
+
df.to_orc(path)
|
429 |
+
with pd.option_context("future.infer_string", True):
|
430 |
+
result = read_orc(path)
|
431 |
+
expected = pd.DataFrame(
|
432 |
+
data={"a": ["x", "y"]},
|
433 |
+
dtype="string[pyarrow_numpy]",
|
434 |
+
columns=pd.Index(["a"], dtype="string[pyarrow_numpy]"),
|
435 |
+
)
|
436 |
+
tm.assert_frame_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_parquet.py
ADDED
@@ -0,0 +1,1424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" test parquet compat """
|
2 |
+
import datetime
|
3 |
+
from decimal import Decimal
|
4 |
+
from io import BytesIO
|
5 |
+
import os
|
6 |
+
import pathlib
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import pytest
|
10 |
+
|
11 |
+
from pandas._config import using_copy_on_write
|
12 |
+
from pandas._config.config import _get_option
|
13 |
+
|
14 |
+
from pandas.compat import is_platform_windows
|
15 |
+
from pandas.compat.pyarrow import (
|
16 |
+
pa_version_under11p0,
|
17 |
+
pa_version_under13p0,
|
18 |
+
pa_version_under15p0,
|
19 |
+
)
|
20 |
+
|
21 |
+
import pandas as pd
|
22 |
+
import pandas._testing as tm
|
23 |
+
from pandas.util.version import Version
|
24 |
+
|
25 |
+
from pandas.io.parquet import (
|
26 |
+
FastParquetImpl,
|
27 |
+
PyArrowImpl,
|
28 |
+
get_engine,
|
29 |
+
read_parquet,
|
30 |
+
to_parquet,
|
31 |
+
)
|
32 |
+
|
33 |
+
try:
|
34 |
+
import pyarrow
|
35 |
+
|
36 |
+
_HAVE_PYARROW = True
|
37 |
+
except ImportError:
|
38 |
+
_HAVE_PYARROW = False
|
39 |
+
|
40 |
+
try:
|
41 |
+
import fastparquet
|
42 |
+
|
43 |
+
_HAVE_FASTPARQUET = True
|
44 |
+
except ImportError:
|
45 |
+
_HAVE_FASTPARQUET = False
|
46 |
+
|
47 |
+
|
48 |
+
# TODO(ArrayManager) fastparquet relies on BlockManager internals
|
49 |
+
|
50 |
+
pytestmark = [
|
51 |
+
pytest.mark.filterwarnings("ignore:DataFrame._data is deprecated:FutureWarning"),
|
52 |
+
pytest.mark.filterwarnings(
|
53 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
54 |
+
),
|
55 |
+
]
|
56 |
+
|
57 |
+
|
58 |
+
# setup engines & skips
|
59 |
+
@pytest.fixture(
|
60 |
+
params=[
|
61 |
+
pytest.param(
|
62 |
+
"fastparquet",
|
63 |
+
marks=pytest.mark.skipif(
|
64 |
+
not _HAVE_FASTPARQUET
|
65 |
+
or _get_option("mode.data_manager", silent=True) == "array",
|
66 |
+
reason="fastparquet is not installed or ArrayManager is used",
|
67 |
+
),
|
68 |
+
),
|
69 |
+
pytest.param(
|
70 |
+
"pyarrow",
|
71 |
+
marks=pytest.mark.skipif(
|
72 |
+
not _HAVE_PYARROW, reason="pyarrow is not installed"
|
73 |
+
),
|
74 |
+
),
|
75 |
+
]
|
76 |
+
)
|
77 |
+
def engine(request):
|
78 |
+
return request.param
|
79 |
+
|
80 |
+
|
81 |
+
@pytest.fixture
|
82 |
+
def pa():
|
83 |
+
if not _HAVE_PYARROW:
|
84 |
+
pytest.skip("pyarrow is not installed")
|
85 |
+
return "pyarrow"
|
86 |
+
|
87 |
+
|
88 |
+
@pytest.fixture
|
89 |
+
def fp():
|
90 |
+
if not _HAVE_FASTPARQUET:
|
91 |
+
pytest.skip("fastparquet is not installed")
|
92 |
+
elif _get_option("mode.data_manager", silent=True) == "array":
|
93 |
+
pytest.skip("ArrayManager is not supported with fastparquet")
|
94 |
+
return "fastparquet"
|
95 |
+
|
96 |
+
|
97 |
+
@pytest.fixture
|
98 |
+
def df_compat():
|
99 |
+
return pd.DataFrame({"A": [1, 2, 3], "B": "foo"})
|
100 |
+
|
101 |
+
|
102 |
+
@pytest.fixture
|
103 |
+
def df_cross_compat():
|
104 |
+
df = pd.DataFrame(
|
105 |
+
{
|
106 |
+
"a": list("abc"),
|
107 |
+
"b": list(range(1, 4)),
|
108 |
+
# 'c': np.arange(3, 6).astype('u1'),
|
109 |
+
"d": np.arange(4.0, 7.0, dtype="float64"),
|
110 |
+
"e": [True, False, True],
|
111 |
+
"f": pd.date_range("20130101", periods=3),
|
112 |
+
# 'g': pd.date_range('20130101', periods=3,
|
113 |
+
# tz='US/Eastern'),
|
114 |
+
# 'h': pd.date_range('20130101', periods=3, freq='ns')
|
115 |
+
}
|
116 |
+
)
|
117 |
+
return df
|
118 |
+
|
119 |
+
|
120 |
+
@pytest.fixture
|
121 |
+
def df_full():
|
122 |
+
return pd.DataFrame(
|
123 |
+
{
|
124 |
+
"string": list("abc"),
|
125 |
+
"string_with_nan": ["a", np.nan, "c"],
|
126 |
+
"string_with_none": ["a", None, "c"],
|
127 |
+
"bytes": [b"foo", b"bar", b"baz"],
|
128 |
+
"unicode": ["foo", "bar", "baz"],
|
129 |
+
"int": list(range(1, 4)),
|
130 |
+
"uint": np.arange(3, 6).astype("u1"),
|
131 |
+
"float": np.arange(4.0, 7.0, dtype="float64"),
|
132 |
+
"float_with_nan": [2.0, np.nan, 3.0],
|
133 |
+
"bool": [True, False, True],
|
134 |
+
"datetime": pd.date_range("20130101", periods=3),
|
135 |
+
"datetime_with_nat": [
|
136 |
+
pd.Timestamp("20130101"),
|
137 |
+
pd.NaT,
|
138 |
+
pd.Timestamp("20130103"),
|
139 |
+
],
|
140 |
+
}
|
141 |
+
)
|
142 |
+
|
143 |
+
|
144 |
+
@pytest.fixture(
|
145 |
+
params=[
|
146 |
+
datetime.datetime.now(datetime.timezone.utc),
|
147 |
+
datetime.datetime.now(datetime.timezone.min),
|
148 |
+
datetime.datetime.now(datetime.timezone.max),
|
149 |
+
datetime.datetime.strptime("2019-01-04T16:41:24+0200", "%Y-%m-%dT%H:%M:%S%z"),
|
150 |
+
datetime.datetime.strptime("2019-01-04T16:41:24+0215", "%Y-%m-%dT%H:%M:%S%z"),
|
151 |
+
datetime.datetime.strptime("2019-01-04T16:41:24-0200", "%Y-%m-%dT%H:%M:%S%z"),
|
152 |
+
datetime.datetime.strptime("2019-01-04T16:41:24-0215", "%Y-%m-%dT%H:%M:%S%z"),
|
153 |
+
]
|
154 |
+
)
|
155 |
+
def timezone_aware_date_list(request):
|
156 |
+
return request.param
|
157 |
+
|
158 |
+
|
159 |
+
def check_round_trip(
|
160 |
+
df,
|
161 |
+
engine=None,
|
162 |
+
path=None,
|
163 |
+
write_kwargs=None,
|
164 |
+
read_kwargs=None,
|
165 |
+
expected=None,
|
166 |
+
check_names=True,
|
167 |
+
check_like=False,
|
168 |
+
check_dtype=True,
|
169 |
+
repeat=2,
|
170 |
+
):
|
171 |
+
"""Verify parquet serializer and deserializer produce the same results.
|
172 |
+
|
173 |
+
Performs a pandas to disk and disk to pandas round trip,
|
174 |
+
then compares the 2 resulting DataFrames to verify equality.
|
175 |
+
|
176 |
+
Parameters
|
177 |
+
----------
|
178 |
+
df: Dataframe
|
179 |
+
engine: str, optional
|
180 |
+
'pyarrow' or 'fastparquet'
|
181 |
+
path: str, optional
|
182 |
+
write_kwargs: dict of str:str, optional
|
183 |
+
read_kwargs: dict of str:str, optional
|
184 |
+
expected: DataFrame, optional
|
185 |
+
Expected deserialization result, otherwise will be equal to `df`
|
186 |
+
check_names: list of str, optional
|
187 |
+
Closed set of column names to be compared
|
188 |
+
check_like: bool, optional
|
189 |
+
If True, ignore the order of index & columns.
|
190 |
+
repeat: int, optional
|
191 |
+
How many times to repeat the test
|
192 |
+
"""
|
193 |
+
write_kwargs = write_kwargs or {"compression": None}
|
194 |
+
read_kwargs = read_kwargs or {}
|
195 |
+
|
196 |
+
if expected is None:
|
197 |
+
expected = df
|
198 |
+
|
199 |
+
if engine:
|
200 |
+
write_kwargs["engine"] = engine
|
201 |
+
read_kwargs["engine"] = engine
|
202 |
+
|
203 |
+
def compare(repeat):
|
204 |
+
for _ in range(repeat):
|
205 |
+
df.to_parquet(path, **write_kwargs)
|
206 |
+
actual = read_parquet(path, **read_kwargs)
|
207 |
+
|
208 |
+
if "string_with_nan" in expected:
|
209 |
+
expected.loc[1, "string_with_nan"] = None
|
210 |
+
tm.assert_frame_equal(
|
211 |
+
expected,
|
212 |
+
actual,
|
213 |
+
check_names=check_names,
|
214 |
+
check_like=check_like,
|
215 |
+
check_dtype=check_dtype,
|
216 |
+
)
|
217 |
+
|
218 |
+
if path is None:
|
219 |
+
with tm.ensure_clean() as path:
|
220 |
+
compare(repeat)
|
221 |
+
else:
|
222 |
+
compare(repeat)
|
223 |
+
|
224 |
+
|
225 |
+
def check_partition_names(path, expected):
|
226 |
+
"""Check partitions of a parquet file are as expected.
|
227 |
+
|
228 |
+
Parameters
|
229 |
+
----------
|
230 |
+
path: str
|
231 |
+
Path of the dataset.
|
232 |
+
expected: iterable of str
|
233 |
+
Expected partition names.
|
234 |
+
"""
|
235 |
+
import pyarrow.dataset as ds
|
236 |
+
|
237 |
+
dataset = ds.dataset(path, partitioning="hive")
|
238 |
+
assert dataset.partitioning.schema.names == expected
|
239 |
+
|
240 |
+
|
241 |
+
def test_invalid_engine(df_compat):
|
242 |
+
msg = "engine must be one of 'pyarrow', 'fastparquet'"
|
243 |
+
with pytest.raises(ValueError, match=msg):
|
244 |
+
check_round_trip(df_compat, "foo", "bar")
|
245 |
+
|
246 |
+
|
247 |
+
def test_options_py(df_compat, pa):
|
248 |
+
# use the set option
|
249 |
+
|
250 |
+
with pd.option_context("io.parquet.engine", "pyarrow"):
|
251 |
+
check_round_trip(df_compat)
|
252 |
+
|
253 |
+
|
254 |
+
def test_options_fp(df_compat, fp):
|
255 |
+
# use the set option
|
256 |
+
|
257 |
+
with pd.option_context("io.parquet.engine", "fastparquet"):
|
258 |
+
check_round_trip(df_compat)
|
259 |
+
|
260 |
+
|
261 |
+
def test_options_auto(df_compat, fp, pa):
|
262 |
+
# use the set option
|
263 |
+
|
264 |
+
with pd.option_context("io.parquet.engine", "auto"):
|
265 |
+
check_round_trip(df_compat)
|
266 |
+
|
267 |
+
|
268 |
+
def test_options_get_engine(fp, pa):
|
269 |
+
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
|
270 |
+
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
|
271 |
+
|
272 |
+
with pd.option_context("io.parquet.engine", "pyarrow"):
|
273 |
+
assert isinstance(get_engine("auto"), PyArrowImpl)
|
274 |
+
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
|
275 |
+
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
|
276 |
+
|
277 |
+
with pd.option_context("io.parquet.engine", "fastparquet"):
|
278 |
+
assert isinstance(get_engine("auto"), FastParquetImpl)
|
279 |
+
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
|
280 |
+
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
|
281 |
+
|
282 |
+
with pd.option_context("io.parquet.engine", "auto"):
|
283 |
+
assert isinstance(get_engine("auto"), PyArrowImpl)
|
284 |
+
assert isinstance(get_engine("pyarrow"), PyArrowImpl)
|
285 |
+
assert isinstance(get_engine("fastparquet"), FastParquetImpl)
|
286 |
+
|
287 |
+
|
288 |
+
def test_get_engine_auto_error_message():
|
289 |
+
# Expect different error messages from get_engine(engine="auto")
|
290 |
+
# if engines aren't installed vs. are installed but bad version
|
291 |
+
from pandas.compat._optional import VERSIONS
|
292 |
+
|
293 |
+
# Do we have engines installed, but a bad version of them?
|
294 |
+
pa_min_ver = VERSIONS.get("pyarrow")
|
295 |
+
fp_min_ver = VERSIONS.get("fastparquet")
|
296 |
+
have_pa_bad_version = (
|
297 |
+
False
|
298 |
+
if not _HAVE_PYARROW
|
299 |
+
else Version(pyarrow.__version__) < Version(pa_min_ver)
|
300 |
+
)
|
301 |
+
have_fp_bad_version = (
|
302 |
+
False
|
303 |
+
if not _HAVE_FASTPARQUET
|
304 |
+
else Version(fastparquet.__version__) < Version(fp_min_ver)
|
305 |
+
)
|
306 |
+
# Do we have usable engines installed?
|
307 |
+
have_usable_pa = _HAVE_PYARROW and not have_pa_bad_version
|
308 |
+
have_usable_fp = _HAVE_FASTPARQUET and not have_fp_bad_version
|
309 |
+
|
310 |
+
if not have_usable_pa and not have_usable_fp:
|
311 |
+
# No usable engines found.
|
312 |
+
if have_pa_bad_version:
|
313 |
+
match = f"Pandas requires version .{pa_min_ver}. or newer of .pyarrow."
|
314 |
+
with pytest.raises(ImportError, match=match):
|
315 |
+
get_engine("auto")
|
316 |
+
else:
|
317 |
+
match = "Missing optional dependency .pyarrow."
|
318 |
+
with pytest.raises(ImportError, match=match):
|
319 |
+
get_engine("auto")
|
320 |
+
|
321 |
+
if have_fp_bad_version:
|
322 |
+
match = f"Pandas requires version .{fp_min_ver}. or newer of .fastparquet."
|
323 |
+
with pytest.raises(ImportError, match=match):
|
324 |
+
get_engine("auto")
|
325 |
+
else:
|
326 |
+
match = "Missing optional dependency .fastparquet."
|
327 |
+
with pytest.raises(ImportError, match=match):
|
328 |
+
get_engine("auto")
|
329 |
+
|
330 |
+
|
331 |
+
def test_cross_engine_pa_fp(df_cross_compat, pa, fp):
|
332 |
+
# cross-compat with differing reading/writing engines
|
333 |
+
|
334 |
+
df = df_cross_compat
|
335 |
+
with tm.ensure_clean() as path:
|
336 |
+
df.to_parquet(path, engine=pa, compression=None)
|
337 |
+
|
338 |
+
result = read_parquet(path, engine=fp)
|
339 |
+
tm.assert_frame_equal(result, df)
|
340 |
+
|
341 |
+
result = read_parquet(path, engine=fp, columns=["a", "d"])
|
342 |
+
tm.assert_frame_equal(result, df[["a", "d"]])
|
343 |
+
|
344 |
+
|
345 |
+
def test_cross_engine_fp_pa(df_cross_compat, pa, fp):
|
346 |
+
# cross-compat with differing reading/writing engines
|
347 |
+
df = df_cross_compat
|
348 |
+
with tm.ensure_clean() as path:
|
349 |
+
df.to_parquet(path, engine=fp, compression=None)
|
350 |
+
|
351 |
+
result = read_parquet(path, engine=pa)
|
352 |
+
tm.assert_frame_equal(result, df)
|
353 |
+
|
354 |
+
result = read_parquet(path, engine=pa, columns=["a", "d"])
|
355 |
+
tm.assert_frame_equal(result, df[["a", "d"]])
|
356 |
+
|
357 |
+
|
358 |
+
def test_parquet_pos_args_deprecation(engine):
|
359 |
+
# GH-54229
|
360 |
+
df = pd.DataFrame({"a": [1, 2, 3]})
|
361 |
+
msg = (
|
362 |
+
r"Starting with pandas version 3.0 all arguments of to_parquet except for the "
|
363 |
+
r"argument 'path' will be keyword-only."
|
364 |
+
)
|
365 |
+
with tm.ensure_clean() as path:
|
366 |
+
with tm.assert_produces_warning(
|
367 |
+
FutureWarning,
|
368 |
+
match=msg,
|
369 |
+
check_stacklevel=False,
|
370 |
+
raise_on_extra_warnings=False,
|
371 |
+
):
|
372 |
+
df.to_parquet(path, engine)
|
373 |
+
|
374 |
+
|
375 |
+
class Base:
|
376 |
+
def check_error_on_write(self, df, engine, exc, err_msg):
|
377 |
+
# check that we are raising the exception on writing
|
378 |
+
with tm.ensure_clean() as path:
|
379 |
+
with pytest.raises(exc, match=err_msg):
|
380 |
+
to_parquet(df, path, engine, compression=None)
|
381 |
+
|
382 |
+
def check_external_error_on_write(self, df, engine, exc):
|
383 |
+
# check that an external library is raising the exception on writing
|
384 |
+
with tm.ensure_clean() as path:
|
385 |
+
with tm.external_error_raised(exc):
|
386 |
+
to_parquet(df, path, engine, compression=None)
|
387 |
+
|
388 |
+
@pytest.mark.network
|
389 |
+
@pytest.mark.single_cpu
|
390 |
+
def test_parquet_read_from_url(self, httpserver, datapath, df_compat, engine):
|
391 |
+
if engine != "auto":
|
392 |
+
pytest.importorskip(engine)
|
393 |
+
with open(datapath("io", "data", "parquet", "simple.parquet"), mode="rb") as f:
|
394 |
+
httpserver.serve_content(content=f.read())
|
395 |
+
df = read_parquet(httpserver.url)
|
396 |
+
tm.assert_frame_equal(df, df_compat)
|
397 |
+
|
398 |
+
|
399 |
+
class TestBasic(Base):
|
400 |
+
def test_error(self, engine):
|
401 |
+
for obj in [
|
402 |
+
pd.Series([1, 2, 3]),
|
403 |
+
1,
|
404 |
+
"foo",
|
405 |
+
pd.Timestamp("20130101"),
|
406 |
+
np.array([1, 2, 3]),
|
407 |
+
]:
|
408 |
+
msg = "to_parquet only supports IO with DataFrames"
|
409 |
+
self.check_error_on_write(obj, engine, ValueError, msg)
|
410 |
+
|
411 |
+
def test_columns_dtypes(self, engine):
|
412 |
+
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
|
413 |
+
|
414 |
+
# unicode
|
415 |
+
df.columns = ["foo", "bar"]
|
416 |
+
check_round_trip(df, engine)
|
417 |
+
|
418 |
+
@pytest.mark.parametrize("compression", [None, "gzip", "snappy", "brotli"])
|
419 |
+
def test_compression(self, engine, compression):
|
420 |
+
df = pd.DataFrame({"A": [1, 2, 3]})
|
421 |
+
check_round_trip(df, engine, write_kwargs={"compression": compression})
|
422 |
+
|
423 |
+
def test_read_columns(self, engine):
|
424 |
+
# GH18154
|
425 |
+
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
|
426 |
+
|
427 |
+
expected = pd.DataFrame({"string": list("abc")})
|
428 |
+
check_round_trip(
|
429 |
+
df, engine, expected=expected, read_kwargs={"columns": ["string"]}
|
430 |
+
)
|
431 |
+
|
432 |
+
def test_read_filters(self, engine, tmp_path):
|
433 |
+
df = pd.DataFrame(
|
434 |
+
{
|
435 |
+
"int": list(range(4)),
|
436 |
+
"part": list("aabb"),
|
437 |
+
}
|
438 |
+
)
|
439 |
+
|
440 |
+
expected = pd.DataFrame({"int": [0, 1]})
|
441 |
+
check_round_trip(
|
442 |
+
df,
|
443 |
+
engine,
|
444 |
+
path=tmp_path,
|
445 |
+
expected=expected,
|
446 |
+
write_kwargs={"partition_cols": ["part"]},
|
447 |
+
read_kwargs={"filters": [("part", "==", "a")], "columns": ["int"]},
|
448 |
+
repeat=1,
|
449 |
+
)
|
450 |
+
|
451 |
+
def test_write_index(self, engine, using_copy_on_write, request):
|
452 |
+
check_names = engine != "fastparquet"
|
453 |
+
if using_copy_on_write and engine == "fastparquet":
|
454 |
+
request.applymarker(
|
455 |
+
pytest.mark.xfail(reason="fastparquet write into index")
|
456 |
+
)
|
457 |
+
|
458 |
+
df = pd.DataFrame({"A": [1, 2, 3]})
|
459 |
+
check_round_trip(df, engine)
|
460 |
+
|
461 |
+
indexes = [
|
462 |
+
[2, 3, 4],
|
463 |
+
pd.date_range("20130101", periods=3),
|
464 |
+
list("abc"),
|
465 |
+
[1, 3, 4],
|
466 |
+
]
|
467 |
+
# non-default index
|
468 |
+
for index in indexes:
|
469 |
+
df.index = index
|
470 |
+
if isinstance(index, pd.DatetimeIndex):
|
471 |
+
df.index = df.index._with_freq(None) # freq doesn't round-trip
|
472 |
+
check_round_trip(df, engine, check_names=check_names)
|
473 |
+
|
474 |
+
# index with meta-data
|
475 |
+
df.index = [0, 1, 2]
|
476 |
+
df.index.name = "foo"
|
477 |
+
check_round_trip(df, engine)
|
478 |
+
|
479 |
+
def test_write_multiindex(self, pa):
|
480 |
+
# Not supported in fastparquet as of 0.1.3 or older pyarrow version
|
481 |
+
engine = pa
|
482 |
+
|
483 |
+
df = pd.DataFrame({"A": [1, 2, 3]})
|
484 |
+
index = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
|
485 |
+
df.index = index
|
486 |
+
check_round_trip(df, engine)
|
487 |
+
|
488 |
+
def test_multiindex_with_columns(self, pa):
|
489 |
+
engine = pa
|
490 |
+
dates = pd.date_range("01-Jan-2018", "01-Dec-2018", freq="MS")
|
491 |
+
df = pd.DataFrame(
|
492 |
+
np.random.default_rng(2).standard_normal((2 * len(dates), 3)),
|
493 |
+
columns=list("ABC"),
|
494 |
+
)
|
495 |
+
index1 = pd.MultiIndex.from_product(
|
496 |
+
[["Level1", "Level2"], dates], names=["level", "date"]
|
497 |
+
)
|
498 |
+
index2 = index1.copy(names=None)
|
499 |
+
for index in [index1, index2]:
|
500 |
+
df.index = index
|
501 |
+
|
502 |
+
check_round_trip(df, engine)
|
503 |
+
check_round_trip(
|
504 |
+
df, engine, read_kwargs={"columns": ["A", "B"]}, expected=df[["A", "B"]]
|
505 |
+
)
|
506 |
+
|
507 |
+
def test_write_ignoring_index(self, engine):
|
508 |
+
# ENH 20768
|
509 |
+
# Ensure index=False omits the index from the written Parquet file.
|
510 |
+
df = pd.DataFrame({"a": [1, 2, 3], "b": ["q", "r", "s"]})
|
511 |
+
|
512 |
+
write_kwargs = {"compression": None, "index": False}
|
513 |
+
|
514 |
+
# Because we're dropping the index, we expect the loaded dataframe to
|
515 |
+
# have the default integer index.
|
516 |
+
expected = df.reset_index(drop=True)
|
517 |
+
|
518 |
+
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
|
519 |
+
|
520 |
+
# Ignore custom index
|
521 |
+
df = pd.DataFrame(
|
522 |
+
{"a": [1, 2, 3], "b": ["q", "r", "s"]}, index=["zyx", "wvu", "tsr"]
|
523 |
+
)
|
524 |
+
|
525 |
+
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
|
526 |
+
|
527 |
+
# Ignore multi-indexes as well.
|
528 |
+
arrays = [
|
529 |
+
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
|
530 |
+
["one", "two", "one", "two", "one", "two", "one", "two"],
|
531 |
+
]
|
532 |
+
df = pd.DataFrame(
|
533 |
+
{"one": list(range(8)), "two": [-i for i in range(8)]}, index=arrays
|
534 |
+
)
|
535 |
+
|
536 |
+
expected = df.reset_index(drop=True)
|
537 |
+
check_round_trip(df, engine, write_kwargs=write_kwargs, expected=expected)
|
538 |
+
|
539 |
+
def test_write_column_multiindex(self, engine):
|
540 |
+
# Not able to write column multi-indexes with non-string column names.
|
541 |
+
mi_columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1)])
|
542 |
+
df = pd.DataFrame(
|
543 |
+
np.random.default_rng(2).standard_normal((4, 3)), columns=mi_columns
|
544 |
+
)
|
545 |
+
|
546 |
+
if engine == "fastparquet":
|
547 |
+
self.check_error_on_write(
|
548 |
+
df, engine, TypeError, "Column name must be a string"
|
549 |
+
)
|
550 |
+
elif engine == "pyarrow":
|
551 |
+
check_round_trip(df, engine)
|
552 |
+
|
553 |
+
def test_write_column_multiindex_nonstring(self, engine):
|
554 |
+
# GH #34777
|
555 |
+
|
556 |
+
# Not able to write column multi-indexes with non-string column names
|
557 |
+
arrays = [
|
558 |
+
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
|
559 |
+
[1, 2, 1, 2, 1, 2, 1, 2],
|
560 |
+
]
|
561 |
+
df = pd.DataFrame(
|
562 |
+
np.random.default_rng(2).standard_normal((8, 8)), columns=arrays
|
563 |
+
)
|
564 |
+
df.columns.names = ["Level1", "Level2"]
|
565 |
+
if engine == "fastparquet":
|
566 |
+
self.check_error_on_write(df, engine, ValueError, "Column name")
|
567 |
+
elif engine == "pyarrow":
|
568 |
+
check_round_trip(df, engine)
|
569 |
+
|
570 |
+
def test_write_column_multiindex_string(self, pa):
|
571 |
+
# GH #34777
|
572 |
+
# Not supported in fastparquet as of 0.1.3
|
573 |
+
engine = pa
|
574 |
+
|
575 |
+
# Write column multi-indexes with string column names
|
576 |
+
arrays = [
|
577 |
+
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
|
578 |
+
["one", "two", "one", "two", "one", "two", "one", "two"],
|
579 |
+
]
|
580 |
+
df = pd.DataFrame(
|
581 |
+
np.random.default_rng(2).standard_normal((8, 8)), columns=arrays
|
582 |
+
)
|
583 |
+
df.columns.names = ["ColLevel1", "ColLevel2"]
|
584 |
+
|
585 |
+
check_round_trip(df, engine)
|
586 |
+
|
587 |
+
def test_write_column_index_string(self, pa):
|
588 |
+
# GH #34777
|
589 |
+
# Not supported in fastparquet as of 0.1.3
|
590 |
+
engine = pa
|
591 |
+
|
592 |
+
# Write column indexes with string column names
|
593 |
+
arrays = ["bar", "baz", "foo", "qux"]
|
594 |
+
df = pd.DataFrame(
|
595 |
+
np.random.default_rng(2).standard_normal((8, 4)), columns=arrays
|
596 |
+
)
|
597 |
+
df.columns.name = "StringCol"
|
598 |
+
|
599 |
+
check_round_trip(df, engine)
|
600 |
+
|
601 |
+
def test_write_column_index_nonstring(self, engine):
|
602 |
+
# GH #34777
|
603 |
+
|
604 |
+
# Write column indexes with string column names
|
605 |
+
arrays = [1, 2, 3, 4]
|
606 |
+
df = pd.DataFrame(
|
607 |
+
np.random.default_rng(2).standard_normal((8, 4)), columns=arrays
|
608 |
+
)
|
609 |
+
df.columns.name = "NonStringCol"
|
610 |
+
if engine == "fastparquet":
|
611 |
+
self.check_error_on_write(
|
612 |
+
df, engine, TypeError, "Column name must be a string"
|
613 |
+
)
|
614 |
+
else:
|
615 |
+
check_round_trip(df, engine)
|
616 |
+
|
617 |
+
def test_dtype_backend(self, engine, request):
|
618 |
+
pq = pytest.importorskip("pyarrow.parquet")
|
619 |
+
|
620 |
+
if engine == "fastparquet":
|
621 |
+
# We are manually disabling fastparquet's
|
622 |
+
# nullable dtype support pending discussion
|
623 |
+
mark = pytest.mark.xfail(
|
624 |
+
reason="Fastparquet nullable dtype support is disabled"
|
625 |
+
)
|
626 |
+
request.applymarker(mark)
|
627 |
+
|
628 |
+
table = pyarrow.table(
|
629 |
+
{
|
630 |
+
"a": pyarrow.array([1, 2, 3, None], "int64"),
|
631 |
+
"b": pyarrow.array([1, 2, 3, None], "uint8"),
|
632 |
+
"c": pyarrow.array(["a", "b", "c", None]),
|
633 |
+
"d": pyarrow.array([True, False, True, None]),
|
634 |
+
# Test that nullable dtypes used even in absence of nulls
|
635 |
+
"e": pyarrow.array([1, 2, 3, 4], "int64"),
|
636 |
+
# GH 45694
|
637 |
+
"f": pyarrow.array([1.0, 2.0, 3.0, None], "float32"),
|
638 |
+
"g": pyarrow.array([1.0, 2.0, 3.0, None], "float64"),
|
639 |
+
}
|
640 |
+
)
|
641 |
+
with tm.ensure_clean() as path:
|
642 |
+
# write manually with pyarrow to write integers
|
643 |
+
pq.write_table(table, path)
|
644 |
+
result1 = read_parquet(path, engine=engine)
|
645 |
+
result2 = read_parquet(path, engine=engine, dtype_backend="numpy_nullable")
|
646 |
+
|
647 |
+
assert result1["a"].dtype == np.dtype("float64")
|
648 |
+
expected = pd.DataFrame(
|
649 |
+
{
|
650 |
+
"a": pd.array([1, 2, 3, None], dtype="Int64"),
|
651 |
+
"b": pd.array([1, 2, 3, None], dtype="UInt8"),
|
652 |
+
"c": pd.array(["a", "b", "c", None], dtype="string"),
|
653 |
+
"d": pd.array([True, False, True, None], dtype="boolean"),
|
654 |
+
"e": pd.array([1, 2, 3, 4], dtype="Int64"),
|
655 |
+
"f": pd.array([1.0, 2.0, 3.0, None], dtype="Float32"),
|
656 |
+
"g": pd.array([1.0, 2.0, 3.0, None], dtype="Float64"),
|
657 |
+
}
|
658 |
+
)
|
659 |
+
if engine == "fastparquet":
|
660 |
+
# Fastparquet doesn't support string columns yet
|
661 |
+
# Only int and boolean
|
662 |
+
result2 = result2.drop("c", axis=1)
|
663 |
+
expected = expected.drop("c", axis=1)
|
664 |
+
tm.assert_frame_equal(result2, expected)
|
665 |
+
|
666 |
+
@pytest.mark.parametrize(
|
667 |
+
"dtype",
|
668 |
+
[
|
669 |
+
"Int64",
|
670 |
+
"UInt8",
|
671 |
+
"boolean",
|
672 |
+
"object",
|
673 |
+
"datetime64[ns, UTC]",
|
674 |
+
"float",
|
675 |
+
"period[D]",
|
676 |
+
"Float64",
|
677 |
+
"string",
|
678 |
+
],
|
679 |
+
)
|
680 |
+
def test_read_empty_array(self, pa, dtype):
|
681 |
+
# GH #41241
|
682 |
+
df = pd.DataFrame(
|
683 |
+
{
|
684 |
+
"value": pd.array([], dtype=dtype),
|
685 |
+
}
|
686 |
+
)
|
687 |
+
# GH 45694
|
688 |
+
expected = None
|
689 |
+
if dtype == "float":
|
690 |
+
expected = pd.DataFrame(
|
691 |
+
{
|
692 |
+
"value": pd.array([], dtype="Float64"),
|
693 |
+
}
|
694 |
+
)
|
695 |
+
check_round_trip(
|
696 |
+
df, pa, read_kwargs={"dtype_backend": "numpy_nullable"}, expected=expected
|
697 |
+
)
|
698 |
+
|
699 |
+
|
700 |
+
class TestParquetPyArrow(Base):
|
701 |
+
def test_basic(self, pa, df_full):
|
702 |
+
df = df_full
|
703 |
+
|
704 |
+
# additional supported types for pyarrow
|
705 |
+
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
|
706 |
+
dti = dti._with_freq(None) # freq doesn't round-trip
|
707 |
+
df["datetime_tz"] = dti
|
708 |
+
df["bool_with_none"] = [True, None, True]
|
709 |
+
|
710 |
+
check_round_trip(df, pa)
|
711 |
+
|
712 |
+
def test_basic_subset_columns(self, pa, df_full):
|
713 |
+
# GH18628
|
714 |
+
|
715 |
+
df = df_full
|
716 |
+
# additional supported types for pyarrow
|
717 |
+
df["datetime_tz"] = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
|
718 |
+
|
719 |
+
check_round_trip(
|
720 |
+
df,
|
721 |
+
pa,
|
722 |
+
expected=df[["string", "int"]],
|
723 |
+
read_kwargs={"columns": ["string", "int"]},
|
724 |
+
)
|
725 |
+
|
726 |
+
def test_to_bytes_without_path_or_buf_provided(self, pa, df_full):
|
727 |
+
# GH 37105
|
728 |
+
buf_bytes = df_full.to_parquet(engine=pa)
|
729 |
+
assert isinstance(buf_bytes, bytes)
|
730 |
+
|
731 |
+
buf_stream = BytesIO(buf_bytes)
|
732 |
+
res = read_parquet(buf_stream)
|
733 |
+
|
734 |
+
expected = df_full.copy()
|
735 |
+
expected.loc[1, "string_with_nan"] = None
|
736 |
+
tm.assert_frame_equal(res, expected)
|
737 |
+
|
738 |
+
def test_duplicate_columns(self, pa):
|
739 |
+
# not currently able to handle duplicate columns
|
740 |
+
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
|
741 |
+
self.check_error_on_write(df, pa, ValueError, "Duplicate column names found")
|
742 |
+
|
743 |
+
def test_timedelta(self, pa):
|
744 |
+
df = pd.DataFrame({"a": pd.timedelta_range("1 day", periods=3)})
|
745 |
+
check_round_trip(df, pa)
|
746 |
+
|
747 |
+
def test_unsupported(self, pa):
|
748 |
+
# mixed python objects
|
749 |
+
df = pd.DataFrame({"a": ["a", 1, 2.0]})
|
750 |
+
# pyarrow 0.11 raises ArrowTypeError
|
751 |
+
# older pyarrows raise ArrowInvalid
|
752 |
+
self.check_external_error_on_write(df, pa, pyarrow.ArrowException)
|
753 |
+
|
754 |
+
def test_unsupported_float16(self, pa):
|
755 |
+
# #44847, #44914
|
756 |
+
# Not able to write float 16 column using pyarrow.
|
757 |
+
data = np.arange(2, 10, dtype=np.float16)
|
758 |
+
df = pd.DataFrame(data=data, columns=["fp16"])
|
759 |
+
if pa_version_under15p0:
|
760 |
+
self.check_external_error_on_write(df, pa, pyarrow.ArrowException)
|
761 |
+
else:
|
762 |
+
check_round_trip(df, pa)
|
763 |
+
|
764 |
+
@pytest.mark.xfail(
|
765 |
+
is_platform_windows(),
|
766 |
+
reason=(
|
767 |
+
"PyArrow does not cleanup of partial files dumps when unsupported "
|
768 |
+
"dtypes are passed to_parquet function in windows"
|
769 |
+
),
|
770 |
+
)
|
771 |
+
@pytest.mark.skipif(not pa_version_under15p0, reason="float16 works on 15")
|
772 |
+
@pytest.mark.parametrize("path_type", [str, pathlib.Path])
|
773 |
+
def test_unsupported_float16_cleanup(self, pa, path_type):
|
774 |
+
# #44847, #44914
|
775 |
+
# Not able to write float 16 column using pyarrow.
|
776 |
+
# Tests cleanup by pyarrow in case of an error
|
777 |
+
data = np.arange(2, 10, dtype=np.float16)
|
778 |
+
df = pd.DataFrame(data=data, columns=["fp16"])
|
779 |
+
|
780 |
+
with tm.ensure_clean() as path_str:
|
781 |
+
path = path_type(path_str)
|
782 |
+
with tm.external_error_raised(pyarrow.ArrowException):
|
783 |
+
df.to_parquet(path=path, engine=pa)
|
784 |
+
assert not os.path.isfile(path)
|
785 |
+
|
786 |
+
def test_categorical(self, pa):
|
787 |
+
# supported in >= 0.7.0
|
788 |
+
df = pd.DataFrame()
|
789 |
+
df["a"] = pd.Categorical(list("abcdef"))
|
790 |
+
|
791 |
+
# test for null, out-of-order values, and unobserved category
|
792 |
+
df["b"] = pd.Categorical(
|
793 |
+
["bar", "foo", "foo", "bar", None, "bar"],
|
794 |
+
dtype=pd.CategoricalDtype(["foo", "bar", "baz"]),
|
795 |
+
)
|
796 |
+
|
797 |
+
# test for ordered flag
|
798 |
+
df["c"] = pd.Categorical(
|
799 |
+
["a", "b", "c", "a", "c", "b"], categories=["b", "c", "d"], ordered=True
|
800 |
+
)
|
801 |
+
|
802 |
+
check_round_trip(df, pa)
|
803 |
+
|
804 |
+
@pytest.mark.single_cpu
|
805 |
+
def test_s3_roundtrip_explicit_fs(self, df_compat, s3_public_bucket, pa, s3so):
|
806 |
+
s3fs = pytest.importorskip("s3fs")
|
807 |
+
s3 = s3fs.S3FileSystem(**s3so)
|
808 |
+
kw = {"filesystem": s3}
|
809 |
+
check_round_trip(
|
810 |
+
df_compat,
|
811 |
+
pa,
|
812 |
+
path=f"{s3_public_bucket.name}/pyarrow.parquet",
|
813 |
+
read_kwargs=kw,
|
814 |
+
write_kwargs=kw,
|
815 |
+
)
|
816 |
+
|
817 |
+
@pytest.mark.single_cpu
|
818 |
+
def test_s3_roundtrip(self, df_compat, s3_public_bucket, pa, s3so):
|
819 |
+
# GH #19134
|
820 |
+
s3so = {"storage_options": s3so}
|
821 |
+
check_round_trip(
|
822 |
+
df_compat,
|
823 |
+
pa,
|
824 |
+
path=f"s3://{s3_public_bucket.name}/pyarrow.parquet",
|
825 |
+
read_kwargs=s3so,
|
826 |
+
write_kwargs=s3so,
|
827 |
+
)
|
828 |
+
|
829 |
+
@pytest.mark.single_cpu
|
830 |
+
@pytest.mark.parametrize(
|
831 |
+
"partition_col",
|
832 |
+
[
|
833 |
+
["A"],
|
834 |
+
[],
|
835 |
+
],
|
836 |
+
)
|
837 |
+
def test_s3_roundtrip_for_dir(
|
838 |
+
self, df_compat, s3_public_bucket, pa, partition_col, s3so
|
839 |
+
):
|
840 |
+
pytest.importorskip("s3fs")
|
841 |
+
# GH #26388
|
842 |
+
expected_df = df_compat.copy()
|
843 |
+
|
844 |
+
# GH #35791
|
845 |
+
if partition_col:
|
846 |
+
expected_df = expected_df.astype(dict.fromkeys(partition_col, np.int32))
|
847 |
+
partition_col_type = "category"
|
848 |
+
|
849 |
+
expected_df[partition_col] = expected_df[partition_col].astype(
|
850 |
+
partition_col_type
|
851 |
+
)
|
852 |
+
|
853 |
+
check_round_trip(
|
854 |
+
df_compat,
|
855 |
+
pa,
|
856 |
+
expected=expected_df,
|
857 |
+
path=f"s3://{s3_public_bucket.name}/parquet_dir",
|
858 |
+
read_kwargs={"storage_options": s3so},
|
859 |
+
write_kwargs={
|
860 |
+
"partition_cols": partition_col,
|
861 |
+
"compression": None,
|
862 |
+
"storage_options": s3so,
|
863 |
+
},
|
864 |
+
check_like=True,
|
865 |
+
repeat=1,
|
866 |
+
)
|
867 |
+
|
868 |
+
def test_read_file_like_obj_support(self, df_compat):
|
869 |
+
pytest.importorskip("pyarrow")
|
870 |
+
buffer = BytesIO()
|
871 |
+
df_compat.to_parquet(buffer)
|
872 |
+
df_from_buf = read_parquet(buffer)
|
873 |
+
tm.assert_frame_equal(df_compat, df_from_buf)
|
874 |
+
|
875 |
+
def test_expand_user(self, df_compat, monkeypatch):
|
876 |
+
pytest.importorskip("pyarrow")
|
877 |
+
monkeypatch.setenv("HOME", "TestingUser")
|
878 |
+
monkeypatch.setenv("USERPROFILE", "TestingUser")
|
879 |
+
with pytest.raises(OSError, match=r".*TestingUser.*"):
|
880 |
+
read_parquet("~/file.parquet")
|
881 |
+
with pytest.raises(OSError, match=r".*TestingUser.*"):
|
882 |
+
df_compat.to_parquet("~/file.parquet")
|
883 |
+
|
884 |
+
def test_partition_cols_supported(self, tmp_path, pa, df_full):
|
885 |
+
# GH #23283
|
886 |
+
partition_cols = ["bool", "int"]
|
887 |
+
df = df_full
|
888 |
+
df.to_parquet(tmp_path, partition_cols=partition_cols, compression=None)
|
889 |
+
check_partition_names(tmp_path, partition_cols)
|
890 |
+
assert read_parquet(tmp_path).shape == df.shape
|
891 |
+
|
892 |
+
def test_partition_cols_string(self, tmp_path, pa, df_full):
|
893 |
+
# GH #27117
|
894 |
+
partition_cols = "bool"
|
895 |
+
partition_cols_list = [partition_cols]
|
896 |
+
df = df_full
|
897 |
+
df.to_parquet(tmp_path, partition_cols=partition_cols, compression=None)
|
898 |
+
check_partition_names(tmp_path, partition_cols_list)
|
899 |
+
assert read_parquet(tmp_path).shape == df.shape
|
900 |
+
|
901 |
+
@pytest.mark.parametrize(
|
902 |
+
"path_type", [str, lambda x: x], ids=["string", "pathlib.Path"]
|
903 |
+
)
|
904 |
+
def test_partition_cols_pathlib(self, tmp_path, pa, df_compat, path_type):
|
905 |
+
# GH 35902
|
906 |
+
|
907 |
+
partition_cols = "B"
|
908 |
+
partition_cols_list = [partition_cols]
|
909 |
+
df = df_compat
|
910 |
+
|
911 |
+
path = path_type(tmp_path)
|
912 |
+
df.to_parquet(path, partition_cols=partition_cols_list)
|
913 |
+
assert read_parquet(path).shape == df.shape
|
914 |
+
|
915 |
+
def test_empty_dataframe(self, pa):
|
916 |
+
# GH #27339
|
917 |
+
df = pd.DataFrame(index=[], columns=[])
|
918 |
+
check_round_trip(df, pa)
|
919 |
+
|
920 |
+
def test_write_with_schema(self, pa):
|
921 |
+
import pyarrow
|
922 |
+
|
923 |
+
df = pd.DataFrame({"x": [0, 1]})
|
924 |
+
schema = pyarrow.schema([pyarrow.field("x", type=pyarrow.bool_())])
|
925 |
+
out_df = df.astype(bool)
|
926 |
+
check_round_trip(df, pa, write_kwargs={"schema": schema}, expected=out_df)
|
927 |
+
|
928 |
+
def test_additional_extension_arrays(self, pa):
|
929 |
+
# test additional ExtensionArrays that are supported through the
|
930 |
+
# __arrow_array__ protocol
|
931 |
+
pytest.importorskip("pyarrow")
|
932 |
+
df = pd.DataFrame(
|
933 |
+
{
|
934 |
+
"a": pd.Series([1, 2, 3], dtype="Int64"),
|
935 |
+
"b": pd.Series([1, 2, 3], dtype="UInt32"),
|
936 |
+
"c": pd.Series(["a", None, "c"], dtype="string"),
|
937 |
+
}
|
938 |
+
)
|
939 |
+
check_round_trip(df, pa)
|
940 |
+
|
941 |
+
df = pd.DataFrame({"a": pd.Series([1, 2, 3, None], dtype="Int64")})
|
942 |
+
check_round_trip(df, pa)
|
943 |
+
|
944 |
+
def test_pyarrow_backed_string_array(self, pa, string_storage):
|
945 |
+
# test ArrowStringArray supported through the __arrow_array__ protocol
|
946 |
+
pytest.importorskip("pyarrow")
|
947 |
+
df = pd.DataFrame({"a": pd.Series(["a", None, "c"], dtype="string[pyarrow]")})
|
948 |
+
with pd.option_context("string_storage", string_storage):
|
949 |
+
check_round_trip(df, pa, expected=df.astype(f"string[{string_storage}]"))
|
950 |
+
|
951 |
+
def test_additional_extension_types(self, pa):
|
952 |
+
# test additional ExtensionArrays that are supported through the
|
953 |
+
# __arrow_array__ protocol + by defining a custom ExtensionType
|
954 |
+
pytest.importorskip("pyarrow")
|
955 |
+
df = pd.DataFrame(
|
956 |
+
{
|
957 |
+
"c": pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (3, 4)]),
|
958 |
+
"d": pd.period_range("2012-01-01", periods=3, freq="D"),
|
959 |
+
# GH-45881 issue with interval with datetime64[ns] subtype
|
960 |
+
"e": pd.IntervalIndex.from_breaks(
|
961 |
+
pd.date_range("2012-01-01", periods=4, freq="D")
|
962 |
+
),
|
963 |
+
}
|
964 |
+
)
|
965 |
+
check_round_trip(df, pa)
|
966 |
+
|
967 |
+
def test_timestamp_nanoseconds(self, pa):
|
968 |
+
# with version 2.6, pyarrow defaults to writing the nanoseconds, so
|
969 |
+
# this should work without error
|
970 |
+
# Note in previous pyarrows(<7.0.0), only the pseudo-version 2.0 was available
|
971 |
+
ver = "2.6"
|
972 |
+
df = pd.DataFrame({"a": pd.date_range("2017-01-01", freq="1ns", periods=10)})
|
973 |
+
check_round_trip(df, pa, write_kwargs={"version": ver})
|
974 |
+
|
975 |
+
def test_timezone_aware_index(self, request, pa, timezone_aware_date_list):
|
976 |
+
if timezone_aware_date_list.tzinfo != datetime.timezone.utc:
|
977 |
+
request.applymarker(
|
978 |
+
pytest.mark.xfail(
|
979 |
+
reason="temporary skip this test until it is properly resolved: "
|
980 |
+
"https://github.com/pandas-dev/pandas/issues/37286"
|
981 |
+
)
|
982 |
+
)
|
983 |
+
idx = 5 * [timezone_aware_date_list]
|
984 |
+
df = pd.DataFrame(index=idx, data={"index_as_col": idx})
|
985 |
+
|
986 |
+
# see gh-36004
|
987 |
+
# compare time(zone) values only, skip their class:
|
988 |
+
# pyarrow always creates fixed offset timezones using pytz.FixedOffset()
|
989 |
+
# even if it was datetime.timezone() originally
|
990 |
+
#
|
991 |
+
# technically they are the same:
|
992 |
+
# they both implement datetime.tzinfo
|
993 |
+
# they both wrap datetime.timedelta()
|
994 |
+
# this use-case sets the resolution to 1 minute
|
995 |
+
check_round_trip(df, pa, check_dtype=False)
|
996 |
+
|
997 |
+
def test_filter_row_groups(self, pa):
|
998 |
+
# https://github.com/pandas-dev/pandas/issues/26551
|
999 |
+
pytest.importorskip("pyarrow")
|
1000 |
+
df = pd.DataFrame({"a": list(range(3))})
|
1001 |
+
with tm.ensure_clean() as path:
|
1002 |
+
df.to_parquet(path, engine=pa)
|
1003 |
+
result = read_parquet(path, pa, filters=[("a", "==", 0)])
|
1004 |
+
assert len(result) == 1
|
1005 |
+
|
1006 |
+
def test_read_parquet_manager(self, pa, using_array_manager):
|
1007 |
+
# ensure that read_parquet honors the pandas.options.mode.data_manager option
|
1008 |
+
df = pd.DataFrame(
|
1009 |
+
np.random.default_rng(2).standard_normal((10, 3)), columns=["A", "B", "C"]
|
1010 |
+
)
|
1011 |
+
|
1012 |
+
with tm.ensure_clean() as path:
|
1013 |
+
df.to_parquet(path, engine=pa)
|
1014 |
+
result = read_parquet(path, pa)
|
1015 |
+
if using_array_manager:
|
1016 |
+
assert isinstance(result._mgr, pd.core.internals.ArrayManager)
|
1017 |
+
else:
|
1018 |
+
assert isinstance(result._mgr, pd.core.internals.BlockManager)
|
1019 |
+
|
1020 |
+
def test_read_dtype_backend_pyarrow_config(self, pa, df_full):
|
1021 |
+
import pyarrow
|
1022 |
+
|
1023 |
+
df = df_full
|
1024 |
+
|
1025 |
+
# additional supported types for pyarrow
|
1026 |
+
dti = pd.date_range("20130101", periods=3, tz="Europe/Brussels")
|
1027 |
+
dti = dti._with_freq(None) # freq doesn't round-trip
|
1028 |
+
df["datetime_tz"] = dti
|
1029 |
+
df["bool_with_none"] = [True, None, True]
|
1030 |
+
|
1031 |
+
pa_table = pyarrow.Table.from_pandas(df)
|
1032 |
+
expected = pa_table.to_pandas(types_mapper=pd.ArrowDtype)
|
1033 |
+
if pa_version_under13p0:
|
1034 |
+
# pyarrow infers datetimes as us instead of ns
|
1035 |
+
expected["datetime"] = expected["datetime"].astype("timestamp[us][pyarrow]")
|
1036 |
+
expected["datetime_with_nat"] = expected["datetime_with_nat"].astype(
|
1037 |
+
"timestamp[us][pyarrow]"
|
1038 |
+
)
|
1039 |
+
expected["datetime_tz"] = expected["datetime_tz"].astype(
|
1040 |
+
pd.ArrowDtype(pyarrow.timestamp(unit="us", tz="Europe/Brussels"))
|
1041 |
+
)
|
1042 |
+
|
1043 |
+
check_round_trip(
|
1044 |
+
df,
|
1045 |
+
engine=pa,
|
1046 |
+
read_kwargs={"dtype_backend": "pyarrow"},
|
1047 |
+
expected=expected,
|
1048 |
+
)
|
1049 |
+
|
1050 |
+
def test_read_dtype_backend_pyarrow_config_index(self, pa):
|
1051 |
+
df = pd.DataFrame(
|
1052 |
+
{"a": [1, 2]}, index=pd.Index([3, 4], name="test"), dtype="int64[pyarrow]"
|
1053 |
+
)
|
1054 |
+
expected = df.copy()
|
1055 |
+
import pyarrow
|
1056 |
+
|
1057 |
+
if Version(pyarrow.__version__) > Version("11.0.0"):
|
1058 |
+
expected.index = expected.index.astype("int64[pyarrow]")
|
1059 |
+
check_round_trip(
|
1060 |
+
df,
|
1061 |
+
engine=pa,
|
1062 |
+
read_kwargs={"dtype_backend": "pyarrow"},
|
1063 |
+
expected=expected,
|
1064 |
+
)
|
1065 |
+
|
1066 |
+
def test_columns_dtypes_not_invalid(self, pa):
|
1067 |
+
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
|
1068 |
+
|
1069 |
+
# numeric
|
1070 |
+
df.columns = [0, 1]
|
1071 |
+
check_round_trip(df, pa)
|
1072 |
+
|
1073 |
+
# bytes
|
1074 |
+
df.columns = [b"foo", b"bar"]
|
1075 |
+
with pytest.raises(NotImplementedError, match="|S3"):
|
1076 |
+
# Bytes fails on read_parquet
|
1077 |
+
check_round_trip(df, pa)
|
1078 |
+
|
1079 |
+
# python object
|
1080 |
+
df.columns = [
|
1081 |
+
datetime.datetime(2011, 1, 1, 0, 0),
|
1082 |
+
datetime.datetime(2011, 1, 1, 1, 1),
|
1083 |
+
]
|
1084 |
+
check_round_trip(df, pa)
|
1085 |
+
|
1086 |
+
def test_empty_columns(self, pa):
|
1087 |
+
# GH 52034
|
1088 |
+
df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name"))
|
1089 |
+
check_round_trip(df, pa)
|
1090 |
+
|
1091 |
+
def test_df_attrs_persistence(self, tmp_path, pa):
|
1092 |
+
path = tmp_path / "test_df_metadata.p"
|
1093 |
+
df = pd.DataFrame(data={1: [1]})
|
1094 |
+
df.attrs = {"test_attribute": 1}
|
1095 |
+
df.to_parquet(path, engine=pa)
|
1096 |
+
new_df = read_parquet(path, engine=pa)
|
1097 |
+
assert new_df.attrs == df.attrs
|
1098 |
+
|
1099 |
+
def test_string_inference(self, tmp_path, pa):
|
1100 |
+
# GH#54431
|
1101 |
+
path = tmp_path / "test_string_inference.p"
|
1102 |
+
df = pd.DataFrame(data={"a": ["x", "y"]}, index=["a", "b"])
|
1103 |
+
df.to_parquet(path, engine="pyarrow")
|
1104 |
+
with pd.option_context("future.infer_string", True):
|
1105 |
+
result = read_parquet(path, engine="pyarrow")
|
1106 |
+
expected = pd.DataFrame(
|
1107 |
+
data={"a": ["x", "y"]},
|
1108 |
+
dtype="string[pyarrow_numpy]",
|
1109 |
+
index=pd.Index(["a", "b"], dtype="string[pyarrow_numpy]"),
|
1110 |
+
)
|
1111 |
+
tm.assert_frame_equal(result, expected)
|
1112 |
+
|
1113 |
+
@pytest.mark.skipif(pa_version_under11p0, reason="not supported before 11.0")
|
1114 |
+
def test_roundtrip_decimal(self, tmp_path, pa):
|
1115 |
+
# GH#54768
|
1116 |
+
import pyarrow as pa
|
1117 |
+
|
1118 |
+
path = tmp_path / "decimal.p"
|
1119 |
+
df = pd.DataFrame({"a": [Decimal("123.00")]}, dtype="string[pyarrow]")
|
1120 |
+
df.to_parquet(path, schema=pa.schema([("a", pa.decimal128(5))]))
|
1121 |
+
result = read_parquet(path)
|
1122 |
+
expected = pd.DataFrame({"a": ["123"]}, dtype="string[python]")
|
1123 |
+
tm.assert_frame_equal(result, expected)
|
1124 |
+
|
1125 |
+
def test_infer_string_large_string_type(self, tmp_path, pa):
|
1126 |
+
# GH#54798
|
1127 |
+
import pyarrow as pa
|
1128 |
+
import pyarrow.parquet as pq
|
1129 |
+
|
1130 |
+
path = tmp_path / "large_string.p"
|
1131 |
+
|
1132 |
+
table = pa.table({"a": pa.array([None, "b", "c"], pa.large_string())})
|
1133 |
+
pq.write_table(table, path)
|
1134 |
+
|
1135 |
+
with pd.option_context("future.infer_string", True):
|
1136 |
+
result = read_parquet(path)
|
1137 |
+
expected = pd.DataFrame(
|
1138 |
+
data={"a": [None, "b", "c"]},
|
1139 |
+
dtype="string[pyarrow_numpy]",
|
1140 |
+
columns=pd.Index(["a"], dtype="string[pyarrow_numpy]"),
|
1141 |
+
)
|
1142 |
+
tm.assert_frame_equal(result, expected)
|
1143 |
+
|
1144 |
+
# NOTE: this test is not run by default, because it requires a lot of memory (>5GB)
|
1145 |
+
# @pytest.mark.slow
|
1146 |
+
# def test_string_column_above_2GB(self, tmp_path, pa):
|
1147 |
+
# # https://github.com/pandas-dev/pandas/issues/55606
|
1148 |
+
# # above 2GB of string data
|
1149 |
+
# v1 = b"x" * 100000000
|
1150 |
+
# v2 = b"x" * 147483646
|
1151 |
+
# df = pd.DataFrame({"strings": [v1] * 20 + [v2] + ["x"] * 20}, dtype="string")
|
1152 |
+
# df.to_parquet(tmp_path / "test.parquet")
|
1153 |
+
# result = read_parquet(tmp_path / "test.parquet")
|
1154 |
+
# assert result["strings"].dtype == "string"
|
1155 |
+
|
1156 |
+
|
1157 |
+
class TestParquetFastParquet(Base):
|
1158 |
+
def test_basic(self, fp, df_full):
|
1159 |
+
df = df_full
|
1160 |
+
|
1161 |
+
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
|
1162 |
+
dti = dti._with_freq(None) # freq doesn't round-trip
|
1163 |
+
df["datetime_tz"] = dti
|
1164 |
+
df["timedelta"] = pd.timedelta_range("1 day", periods=3)
|
1165 |
+
check_round_trip(df, fp)
|
1166 |
+
|
1167 |
+
def test_columns_dtypes_invalid(self, fp):
|
1168 |
+
df = pd.DataFrame({"string": list("abc"), "int": list(range(1, 4))})
|
1169 |
+
|
1170 |
+
err = TypeError
|
1171 |
+
msg = "Column name must be a string"
|
1172 |
+
|
1173 |
+
# numeric
|
1174 |
+
df.columns = [0, 1]
|
1175 |
+
self.check_error_on_write(df, fp, err, msg)
|
1176 |
+
|
1177 |
+
# bytes
|
1178 |
+
df.columns = [b"foo", b"bar"]
|
1179 |
+
self.check_error_on_write(df, fp, err, msg)
|
1180 |
+
|
1181 |
+
# python object
|
1182 |
+
df.columns = [
|
1183 |
+
datetime.datetime(2011, 1, 1, 0, 0),
|
1184 |
+
datetime.datetime(2011, 1, 1, 1, 1),
|
1185 |
+
]
|
1186 |
+
self.check_error_on_write(df, fp, err, msg)
|
1187 |
+
|
1188 |
+
def test_duplicate_columns(self, fp):
|
1189 |
+
# not currently able to handle duplicate columns
|
1190 |
+
df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list("aaa")).copy()
|
1191 |
+
msg = "Cannot create parquet dataset with duplicate column names"
|
1192 |
+
self.check_error_on_write(df, fp, ValueError, msg)
|
1193 |
+
|
1194 |
+
def test_bool_with_none(self, fp):
|
1195 |
+
df = pd.DataFrame({"a": [True, None, False]})
|
1196 |
+
expected = pd.DataFrame({"a": [1.0, np.nan, 0.0]}, dtype="float16")
|
1197 |
+
# Fastparquet bug in 0.7.1 makes it so that this dtype becomes
|
1198 |
+
# float64
|
1199 |
+
check_round_trip(df, fp, expected=expected, check_dtype=False)
|
1200 |
+
|
1201 |
+
def test_unsupported(self, fp):
|
1202 |
+
# period
|
1203 |
+
df = pd.DataFrame({"a": pd.period_range("2013", freq="M", periods=3)})
|
1204 |
+
# error from fastparquet -> don't check exact error message
|
1205 |
+
self.check_error_on_write(df, fp, ValueError, None)
|
1206 |
+
|
1207 |
+
# mixed
|
1208 |
+
df = pd.DataFrame({"a": ["a", 1, 2.0]})
|
1209 |
+
msg = "Can't infer object conversion type"
|
1210 |
+
self.check_error_on_write(df, fp, ValueError, msg)
|
1211 |
+
|
1212 |
+
def test_categorical(self, fp):
|
1213 |
+
df = pd.DataFrame({"a": pd.Categorical(list("abc"))})
|
1214 |
+
check_round_trip(df, fp)
|
1215 |
+
|
1216 |
+
def test_filter_row_groups(self, fp):
|
1217 |
+
d = {"a": list(range(3))}
|
1218 |
+
df = pd.DataFrame(d)
|
1219 |
+
with tm.ensure_clean() as path:
|
1220 |
+
df.to_parquet(path, engine=fp, compression=None, row_group_offsets=1)
|
1221 |
+
result = read_parquet(path, fp, filters=[("a", "==", 0)])
|
1222 |
+
assert len(result) == 1
|
1223 |
+
|
1224 |
+
@pytest.mark.single_cpu
|
1225 |
+
def test_s3_roundtrip(self, df_compat, s3_public_bucket, fp, s3so):
|
1226 |
+
# GH #19134
|
1227 |
+
check_round_trip(
|
1228 |
+
df_compat,
|
1229 |
+
fp,
|
1230 |
+
path=f"s3://{s3_public_bucket.name}/fastparquet.parquet",
|
1231 |
+
read_kwargs={"storage_options": s3so},
|
1232 |
+
write_kwargs={"compression": None, "storage_options": s3so},
|
1233 |
+
)
|
1234 |
+
|
1235 |
+
def test_partition_cols_supported(self, tmp_path, fp, df_full):
|
1236 |
+
# GH #23283
|
1237 |
+
partition_cols = ["bool", "int"]
|
1238 |
+
df = df_full
|
1239 |
+
df.to_parquet(
|
1240 |
+
tmp_path,
|
1241 |
+
engine="fastparquet",
|
1242 |
+
partition_cols=partition_cols,
|
1243 |
+
compression=None,
|
1244 |
+
)
|
1245 |
+
assert os.path.exists(tmp_path)
|
1246 |
+
import fastparquet
|
1247 |
+
|
1248 |
+
actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats
|
1249 |
+
assert len(actual_partition_cols) == 2
|
1250 |
+
|
1251 |
+
def test_partition_cols_string(self, tmp_path, fp, df_full):
|
1252 |
+
# GH #27117
|
1253 |
+
partition_cols = "bool"
|
1254 |
+
df = df_full
|
1255 |
+
df.to_parquet(
|
1256 |
+
tmp_path,
|
1257 |
+
engine="fastparquet",
|
1258 |
+
partition_cols=partition_cols,
|
1259 |
+
compression=None,
|
1260 |
+
)
|
1261 |
+
assert os.path.exists(tmp_path)
|
1262 |
+
import fastparquet
|
1263 |
+
|
1264 |
+
actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats
|
1265 |
+
assert len(actual_partition_cols) == 1
|
1266 |
+
|
1267 |
+
def test_partition_on_supported(self, tmp_path, fp, df_full):
|
1268 |
+
# GH #23283
|
1269 |
+
partition_cols = ["bool", "int"]
|
1270 |
+
df = df_full
|
1271 |
+
df.to_parquet(
|
1272 |
+
tmp_path,
|
1273 |
+
engine="fastparquet",
|
1274 |
+
compression=None,
|
1275 |
+
partition_on=partition_cols,
|
1276 |
+
)
|
1277 |
+
assert os.path.exists(tmp_path)
|
1278 |
+
import fastparquet
|
1279 |
+
|
1280 |
+
actual_partition_cols = fastparquet.ParquetFile(str(tmp_path), False).cats
|
1281 |
+
assert len(actual_partition_cols) == 2
|
1282 |
+
|
1283 |
+
def test_error_on_using_partition_cols_and_partition_on(
|
1284 |
+
self, tmp_path, fp, df_full
|
1285 |
+
):
|
1286 |
+
# GH #23283
|
1287 |
+
partition_cols = ["bool", "int"]
|
1288 |
+
df = df_full
|
1289 |
+
msg = (
|
1290 |
+
"Cannot use both partition_on and partition_cols. Use partition_cols for "
|
1291 |
+
"partitioning data"
|
1292 |
+
)
|
1293 |
+
with pytest.raises(ValueError, match=msg):
|
1294 |
+
df.to_parquet(
|
1295 |
+
tmp_path,
|
1296 |
+
engine="fastparquet",
|
1297 |
+
compression=None,
|
1298 |
+
partition_on=partition_cols,
|
1299 |
+
partition_cols=partition_cols,
|
1300 |
+
)
|
1301 |
+
|
1302 |
+
@pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index")
|
1303 |
+
def test_empty_dataframe(self, fp):
|
1304 |
+
# GH #27339
|
1305 |
+
df = pd.DataFrame()
|
1306 |
+
expected = df.copy()
|
1307 |
+
check_round_trip(df, fp, expected=expected)
|
1308 |
+
|
1309 |
+
@pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index")
|
1310 |
+
def test_timezone_aware_index(self, fp, timezone_aware_date_list):
|
1311 |
+
idx = 5 * [timezone_aware_date_list]
|
1312 |
+
|
1313 |
+
df = pd.DataFrame(index=idx, data={"index_as_col": idx})
|
1314 |
+
|
1315 |
+
expected = df.copy()
|
1316 |
+
expected.index.name = "index"
|
1317 |
+
check_round_trip(df, fp, expected=expected)
|
1318 |
+
|
1319 |
+
def test_use_nullable_dtypes_not_supported(self, fp):
|
1320 |
+
df = pd.DataFrame({"a": [1, 2]})
|
1321 |
+
|
1322 |
+
with tm.ensure_clean() as path:
|
1323 |
+
df.to_parquet(path)
|
1324 |
+
with pytest.raises(ValueError, match="not supported for the fastparquet"):
|
1325 |
+
with tm.assert_produces_warning(FutureWarning):
|
1326 |
+
read_parquet(path, engine="fastparquet", use_nullable_dtypes=True)
|
1327 |
+
with pytest.raises(ValueError, match="not supported for the fastparquet"):
|
1328 |
+
read_parquet(path, engine="fastparquet", dtype_backend="pyarrow")
|
1329 |
+
|
1330 |
+
def test_close_file_handle_on_read_error(self):
|
1331 |
+
with tm.ensure_clean("test.parquet") as path:
|
1332 |
+
pathlib.Path(path).write_bytes(b"breakit")
|
1333 |
+
with pytest.raises(Exception, match=""): # Not important which exception
|
1334 |
+
read_parquet(path, engine="fastparquet")
|
1335 |
+
# The next line raises an error on Windows if the file is still open
|
1336 |
+
pathlib.Path(path).unlink(missing_ok=False)
|
1337 |
+
|
1338 |
+
def test_bytes_file_name(self, engine):
|
1339 |
+
# GH#48944
|
1340 |
+
df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
|
1341 |
+
with tm.ensure_clean("test.parquet") as path:
|
1342 |
+
with open(path.encode(), "wb") as f:
|
1343 |
+
df.to_parquet(f)
|
1344 |
+
|
1345 |
+
result = read_parquet(path, engine=engine)
|
1346 |
+
tm.assert_frame_equal(result, df)
|
1347 |
+
|
1348 |
+
def test_filesystem_notimplemented(self):
|
1349 |
+
pytest.importorskip("fastparquet")
|
1350 |
+
df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
|
1351 |
+
with tm.ensure_clean() as path:
|
1352 |
+
with pytest.raises(
|
1353 |
+
NotImplementedError, match="filesystem is not implemented"
|
1354 |
+
):
|
1355 |
+
df.to_parquet(path, engine="fastparquet", filesystem="foo")
|
1356 |
+
|
1357 |
+
with tm.ensure_clean() as path:
|
1358 |
+
pathlib.Path(path).write_bytes(b"foo")
|
1359 |
+
with pytest.raises(
|
1360 |
+
NotImplementedError, match="filesystem is not implemented"
|
1361 |
+
):
|
1362 |
+
read_parquet(path, engine="fastparquet", filesystem="foo")
|
1363 |
+
|
1364 |
+
def test_invalid_filesystem(self):
|
1365 |
+
pytest.importorskip("pyarrow")
|
1366 |
+
df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
|
1367 |
+
with tm.ensure_clean() as path:
|
1368 |
+
with pytest.raises(
|
1369 |
+
ValueError, match="filesystem must be a pyarrow or fsspec FileSystem"
|
1370 |
+
):
|
1371 |
+
df.to_parquet(path, engine="pyarrow", filesystem="foo")
|
1372 |
+
|
1373 |
+
with tm.ensure_clean() as path:
|
1374 |
+
pathlib.Path(path).write_bytes(b"foo")
|
1375 |
+
with pytest.raises(
|
1376 |
+
ValueError, match="filesystem must be a pyarrow or fsspec FileSystem"
|
1377 |
+
):
|
1378 |
+
read_parquet(path, engine="pyarrow", filesystem="foo")
|
1379 |
+
|
1380 |
+
def test_unsupported_pa_filesystem_storage_options(self):
|
1381 |
+
pa_fs = pytest.importorskip("pyarrow.fs")
|
1382 |
+
df = pd.DataFrame(data={"A": [0, 1], "B": [1, 0]})
|
1383 |
+
with tm.ensure_clean() as path:
|
1384 |
+
with pytest.raises(
|
1385 |
+
NotImplementedError,
|
1386 |
+
match="storage_options not supported with a pyarrow FileSystem.",
|
1387 |
+
):
|
1388 |
+
df.to_parquet(
|
1389 |
+
path,
|
1390 |
+
engine="pyarrow",
|
1391 |
+
filesystem=pa_fs.LocalFileSystem(),
|
1392 |
+
storage_options={"foo": "bar"},
|
1393 |
+
)
|
1394 |
+
|
1395 |
+
with tm.ensure_clean() as path:
|
1396 |
+
pathlib.Path(path).write_bytes(b"foo")
|
1397 |
+
with pytest.raises(
|
1398 |
+
NotImplementedError,
|
1399 |
+
match="storage_options not supported with a pyarrow FileSystem.",
|
1400 |
+
):
|
1401 |
+
read_parquet(
|
1402 |
+
path,
|
1403 |
+
engine="pyarrow",
|
1404 |
+
filesystem=pa_fs.LocalFileSystem(),
|
1405 |
+
storage_options={"foo": "bar"},
|
1406 |
+
)
|
1407 |
+
|
1408 |
+
def test_invalid_dtype_backend(self, engine):
|
1409 |
+
msg = (
|
1410 |
+
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
|
1411 |
+
"'pyarrow' are allowed."
|
1412 |
+
)
|
1413 |
+
df = pd.DataFrame({"int": list(range(1, 4))})
|
1414 |
+
with tm.ensure_clean("tmp.parquet") as path:
|
1415 |
+
df.to_parquet(path)
|
1416 |
+
with pytest.raises(ValueError, match=msg):
|
1417 |
+
read_parquet(path, dtype_backend="numpy")
|
1418 |
+
|
1419 |
+
@pytest.mark.skipif(using_copy_on_write(), reason="fastparquet writes into Index")
|
1420 |
+
def test_empty_columns(self, fp):
|
1421 |
+
# GH 52034
|
1422 |
+
df = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name"))
|
1423 |
+
expected = pd.DataFrame(index=pd.Index(["a", "b", "c"], name="custom name"))
|
1424 |
+
check_round_trip(df, fp, expected=expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_pickle.py
ADDED
@@ -0,0 +1,652 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
manage legacy pickle tests
|
3 |
+
|
4 |
+
How to add pickle tests:
|
5 |
+
|
6 |
+
1. Install pandas version intended to output the pickle.
|
7 |
+
|
8 |
+
2. Execute "generate_legacy_storage_files.py" to create the pickle.
|
9 |
+
$ python generate_legacy_storage_files.py <output_dir> pickle
|
10 |
+
|
11 |
+
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
|
12 |
+
"""
|
13 |
+
from __future__ import annotations
|
14 |
+
|
15 |
+
from array import array
|
16 |
+
import bz2
|
17 |
+
import datetime
|
18 |
+
import functools
|
19 |
+
from functools import partial
|
20 |
+
import gzip
|
21 |
+
import io
|
22 |
+
import os
|
23 |
+
from pathlib import Path
|
24 |
+
import pickle
|
25 |
+
import shutil
|
26 |
+
import tarfile
|
27 |
+
from typing import Any
|
28 |
+
import uuid
|
29 |
+
import zipfile
|
30 |
+
|
31 |
+
import numpy as np
|
32 |
+
import pytest
|
33 |
+
|
34 |
+
from pandas.compat import (
|
35 |
+
get_lzma_file,
|
36 |
+
is_platform_little_endian,
|
37 |
+
)
|
38 |
+
from pandas.compat._optional import import_optional_dependency
|
39 |
+
from pandas.compat.compressors import flatten_buffer
|
40 |
+
import pandas.util._test_decorators as td
|
41 |
+
|
42 |
+
import pandas as pd
|
43 |
+
from pandas import (
|
44 |
+
DataFrame,
|
45 |
+
Index,
|
46 |
+
Series,
|
47 |
+
period_range,
|
48 |
+
)
|
49 |
+
import pandas._testing as tm
|
50 |
+
from pandas.tests.io.generate_legacy_storage_files import create_pickle_data
|
51 |
+
|
52 |
+
import pandas.io.common as icom
|
53 |
+
from pandas.tseries.offsets import (
|
54 |
+
Day,
|
55 |
+
MonthEnd,
|
56 |
+
)
|
57 |
+
|
58 |
+
|
59 |
+
# ---------------------
|
60 |
+
# comparison functions
|
61 |
+
# ---------------------
|
62 |
+
def compare_element(result, expected, typ):
|
63 |
+
if isinstance(expected, Index):
|
64 |
+
tm.assert_index_equal(expected, result)
|
65 |
+
return
|
66 |
+
|
67 |
+
if typ.startswith("sp_"):
|
68 |
+
tm.assert_equal(result, expected)
|
69 |
+
elif typ == "timestamp":
|
70 |
+
if expected is pd.NaT:
|
71 |
+
assert result is pd.NaT
|
72 |
+
else:
|
73 |
+
assert result == expected
|
74 |
+
else:
|
75 |
+
comparator = getattr(tm, f"assert_{typ}_equal", tm.assert_almost_equal)
|
76 |
+
comparator(result, expected)
|
77 |
+
|
78 |
+
|
79 |
+
# ---------------------
|
80 |
+
# tests
|
81 |
+
# ---------------------
|
82 |
+
|
83 |
+
|
84 |
+
@pytest.mark.parametrize(
|
85 |
+
"data",
|
86 |
+
[
|
87 |
+
b"123",
|
88 |
+
b"123456",
|
89 |
+
bytearray(b"123"),
|
90 |
+
memoryview(b"123"),
|
91 |
+
pickle.PickleBuffer(b"123"),
|
92 |
+
array("I", [1, 2, 3]),
|
93 |
+
memoryview(b"123456").cast("B", (3, 2)),
|
94 |
+
memoryview(b"123456").cast("B", (3, 2))[::2],
|
95 |
+
np.arange(12).reshape((3, 4), order="C"),
|
96 |
+
np.arange(12).reshape((3, 4), order="F"),
|
97 |
+
np.arange(12).reshape((3, 4), order="C")[:, ::2],
|
98 |
+
],
|
99 |
+
)
|
100 |
+
def test_flatten_buffer(data):
|
101 |
+
result = flatten_buffer(data)
|
102 |
+
expected = memoryview(data).tobytes("A")
|
103 |
+
assert result == expected
|
104 |
+
if isinstance(data, (bytes, bytearray)):
|
105 |
+
assert result is data
|
106 |
+
elif isinstance(result, memoryview):
|
107 |
+
assert result.ndim == 1
|
108 |
+
assert result.format == "B"
|
109 |
+
assert result.contiguous
|
110 |
+
assert result.shape == (result.nbytes,)
|
111 |
+
|
112 |
+
|
113 |
+
def test_pickles(datapath):
|
114 |
+
if not is_platform_little_endian():
|
115 |
+
pytest.skip("known failure on non-little endian")
|
116 |
+
|
117 |
+
# For loop for compat with --strict-data-files
|
118 |
+
for legacy_pickle in Path(__file__).parent.glob("data/legacy_pickle/*/*.p*kl*"):
|
119 |
+
legacy_pickle = datapath(legacy_pickle)
|
120 |
+
|
121 |
+
data = pd.read_pickle(legacy_pickle)
|
122 |
+
|
123 |
+
for typ, dv in data.items():
|
124 |
+
for dt, result in dv.items():
|
125 |
+
expected = data[typ][dt]
|
126 |
+
|
127 |
+
if typ == "series" and dt == "ts":
|
128 |
+
# GH 7748
|
129 |
+
tm.assert_series_equal(result, expected)
|
130 |
+
assert result.index.freq == expected.index.freq
|
131 |
+
assert not result.index.freq.normalize
|
132 |
+
tm.assert_series_equal(result > 0, expected > 0)
|
133 |
+
|
134 |
+
# GH 9291
|
135 |
+
freq = result.index.freq
|
136 |
+
assert freq + Day(1) == Day(2)
|
137 |
+
|
138 |
+
res = freq + pd.Timedelta(hours=1)
|
139 |
+
assert isinstance(res, pd.Timedelta)
|
140 |
+
assert res == pd.Timedelta(days=1, hours=1)
|
141 |
+
|
142 |
+
res = freq + pd.Timedelta(nanoseconds=1)
|
143 |
+
assert isinstance(res, pd.Timedelta)
|
144 |
+
assert res == pd.Timedelta(days=1, nanoseconds=1)
|
145 |
+
elif typ == "index" and dt == "period":
|
146 |
+
tm.assert_index_equal(result, expected)
|
147 |
+
assert isinstance(result.freq, MonthEnd)
|
148 |
+
assert result.freq == MonthEnd()
|
149 |
+
assert result.freqstr == "M"
|
150 |
+
tm.assert_index_equal(result.shift(2), expected.shift(2))
|
151 |
+
elif typ == "series" and dt in ("dt_tz", "cat"):
|
152 |
+
tm.assert_series_equal(result, expected)
|
153 |
+
elif typ == "frame" and dt in (
|
154 |
+
"dt_mixed_tzs",
|
155 |
+
"cat_onecol",
|
156 |
+
"cat_and_float",
|
157 |
+
):
|
158 |
+
tm.assert_frame_equal(result, expected)
|
159 |
+
else:
|
160 |
+
compare_element(result, expected, typ)
|
161 |
+
|
162 |
+
|
163 |
+
def python_pickler(obj, path):
|
164 |
+
with open(path, "wb") as fh:
|
165 |
+
pickle.dump(obj, fh, protocol=-1)
|
166 |
+
|
167 |
+
|
168 |
+
def python_unpickler(path):
|
169 |
+
with open(path, "rb") as fh:
|
170 |
+
fh.seek(0)
|
171 |
+
return pickle.load(fh)
|
172 |
+
|
173 |
+
|
174 |
+
def flatten(data: dict) -> list[tuple[str, Any]]:
|
175 |
+
"""Flatten create_pickle_data"""
|
176 |
+
return [
|
177 |
+
(typ, example)
|
178 |
+
for typ, examples in data.items()
|
179 |
+
for example in examples.values()
|
180 |
+
]
|
181 |
+
|
182 |
+
|
183 |
+
@pytest.mark.parametrize(
|
184 |
+
"pickle_writer",
|
185 |
+
[
|
186 |
+
pytest.param(python_pickler, id="python"),
|
187 |
+
pytest.param(pd.to_pickle, id="pandas_proto_default"),
|
188 |
+
pytest.param(
|
189 |
+
functools.partial(pd.to_pickle, protocol=pickle.HIGHEST_PROTOCOL),
|
190 |
+
id="pandas_proto_highest",
|
191 |
+
),
|
192 |
+
pytest.param(functools.partial(pd.to_pickle, protocol=4), id="pandas_proto_4"),
|
193 |
+
pytest.param(
|
194 |
+
functools.partial(pd.to_pickle, protocol=5),
|
195 |
+
id="pandas_proto_5",
|
196 |
+
),
|
197 |
+
],
|
198 |
+
)
|
199 |
+
@pytest.mark.parametrize("writer", [pd.to_pickle, python_pickler])
|
200 |
+
@pytest.mark.parametrize("typ, expected", flatten(create_pickle_data()))
|
201 |
+
def test_round_trip_current(typ, expected, pickle_writer, writer):
|
202 |
+
with tm.ensure_clean() as path:
|
203 |
+
# test writing with each pickler
|
204 |
+
pickle_writer(expected, path)
|
205 |
+
|
206 |
+
# test reading with each unpickler
|
207 |
+
result = pd.read_pickle(path)
|
208 |
+
compare_element(result, expected, typ)
|
209 |
+
|
210 |
+
result = python_unpickler(path)
|
211 |
+
compare_element(result, expected, typ)
|
212 |
+
|
213 |
+
# and the same for file objects (GH 35679)
|
214 |
+
with open(path, mode="wb") as handle:
|
215 |
+
writer(expected, path)
|
216 |
+
handle.seek(0) # shouldn't close file handle
|
217 |
+
with open(path, mode="rb") as handle:
|
218 |
+
result = pd.read_pickle(handle)
|
219 |
+
handle.seek(0) # shouldn't close file handle
|
220 |
+
compare_element(result, expected, typ)
|
221 |
+
|
222 |
+
|
223 |
+
def test_pickle_path_pathlib():
|
224 |
+
df = DataFrame(
|
225 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
226 |
+
columns=Index(list("ABCD"), dtype=object),
|
227 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
228 |
+
)
|
229 |
+
result = tm.round_trip_pathlib(df.to_pickle, pd.read_pickle)
|
230 |
+
tm.assert_frame_equal(df, result)
|
231 |
+
|
232 |
+
|
233 |
+
def test_pickle_path_localpath():
|
234 |
+
df = DataFrame(
|
235 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
236 |
+
columns=Index(list("ABCD"), dtype=object),
|
237 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
238 |
+
)
|
239 |
+
result = tm.round_trip_localpath(df.to_pickle, pd.read_pickle)
|
240 |
+
tm.assert_frame_equal(df, result)
|
241 |
+
|
242 |
+
|
243 |
+
# ---------------------
|
244 |
+
# test pickle compression
|
245 |
+
# ---------------------
|
246 |
+
|
247 |
+
|
248 |
+
@pytest.fixture
|
249 |
+
def get_random_path():
|
250 |
+
return f"__{uuid.uuid4()}__.pickle"
|
251 |
+
|
252 |
+
|
253 |
+
class TestCompression:
|
254 |
+
_extension_to_compression = icom.extension_to_compression
|
255 |
+
|
256 |
+
def compress_file(self, src_path, dest_path, compression):
|
257 |
+
if compression is None:
|
258 |
+
shutil.copyfile(src_path, dest_path)
|
259 |
+
return
|
260 |
+
|
261 |
+
if compression == "gzip":
|
262 |
+
f = gzip.open(dest_path, "w")
|
263 |
+
elif compression == "bz2":
|
264 |
+
f = bz2.BZ2File(dest_path, "w")
|
265 |
+
elif compression == "zip":
|
266 |
+
with zipfile.ZipFile(dest_path, "w", compression=zipfile.ZIP_DEFLATED) as f:
|
267 |
+
f.write(src_path, os.path.basename(src_path))
|
268 |
+
elif compression == "tar":
|
269 |
+
with open(src_path, "rb") as fh:
|
270 |
+
with tarfile.open(dest_path, mode="w") as tar:
|
271 |
+
tarinfo = tar.gettarinfo(src_path, os.path.basename(src_path))
|
272 |
+
tar.addfile(tarinfo, fh)
|
273 |
+
elif compression == "xz":
|
274 |
+
f = get_lzma_file()(dest_path, "w")
|
275 |
+
elif compression == "zstd":
|
276 |
+
f = import_optional_dependency("zstandard").open(dest_path, "wb")
|
277 |
+
else:
|
278 |
+
msg = f"Unrecognized compression type: {compression}"
|
279 |
+
raise ValueError(msg)
|
280 |
+
|
281 |
+
if compression not in ["zip", "tar"]:
|
282 |
+
with open(src_path, "rb") as fh:
|
283 |
+
with f:
|
284 |
+
f.write(fh.read())
|
285 |
+
|
286 |
+
def test_write_explicit(self, compression, get_random_path):
|
287 |
+
base = get_random_path
|
288 |
+
path1 = base + ".compressed"
|
289 |
+
path2 = base + ".raw"
|
290 |
+
|
291 |
+
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
|
292 |
+
df = DataFrame(
|
293 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
294 |
+
columns=Index(list("ABCD"), dtype=object),
|
295 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
296 |
+
)
|
297 |
+
|
298 |
+
# write to compressed file
|
299 |
+
df.to_pickle(p1, compression=compression)
|
300 |
+
|
301 |
+
# decompress
|
302 |
+
with tm.decompress_file(p1, compression=compression) as f:
|
303 |
+
with open(p2, "wb") as fh:
|
304 |
+
fh.write(f.read())
|
305 |
+
|
306 |
+
# read decompressed file
|
307 |
+
df2 = pd.read_pickle(p2, compression=None)
|
308 |
+
|
309 |
+
tm.assert_frame_equal(df, df2)
|
310 |
+
|
311 |
+
@pytest.mark.parametrize("compression", ["", "None", "bad", "7z"])
|
312 |
+
def test_write_explicit_bad(self, compression, get_random_path):
|
313 |
+
with pytest.raises(ValueError, match="Unrecognized compression type"):
|
314 |
+
with tm.ensure_clean(get_random_path) as path:
|
315 |
+
df = DataFrame(
|
316 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
317 |
+
columns=Index(list("ABCD"), dtype=object),
|
318 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
319 |
+
)
|
320 |
+
df.to_pickle(path, compression=compression)
|
321 |
+
|
322 |
+
def test_write_infer(self, compression_ext, get_random_path):
|
323 |
+
base = get_random_path
|
324 |
+
path1 = base + compression_ext
|
325 |
+
path2 = base + ".raw"
|
326 |
+
compression = self._extension_to_compression.get(compression_ext.lower())
|
327 |
+
|
328 |
+
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
|
329 |
+
df = DataFrame(
|
330 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
331 |
+
columns=Index(list("ABCD"), dtype=object),
|
332 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
333 |
+
)
|
334 |
+
|
335 |
+
# write to compressed file by inferred compression method
|
336 |
+
df.to_pickle(p1)
|
337 |
+
|
338 |
+
# decompress
|
339 |
+
with tm.decompress_file(p1, compression=compression) as f:
|
340 |
+
with open(p2, "wb") as fh:
|
341 |
+
fh.write(f.read())
|
342 |
+
|
343 |
+
# read decompressed file
|
344 |
+
df2 = pd.read_pickle(p2, compression=None)
|
345 |
+
|
346 |
+
tm.assert_frame_equal(df, df2)
|
347 |
+
|
348 |
+
def test_read_explicit(self, compression, get_random_path):
|
349 |
+
base = get_random_path
|
350 |
+
path1 = base + ".raw"
|
351 |
+
path2 = base + ".compressed"
|
352 |
+
|
353 |
+
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
|
354 |
+
df = DataFrame(
|
355 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
356 |
+
columns=Index(list("ABCD"), dtype=object),
|
357 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
358 |
+
)
|
359 |
+
|
360 |
+
# write to uncompressed file
|
361 |
+
df.to_pickle(p1, compression=None)
|
362 |
+
|
363 |
+
# compress
|
364 |
+
self.compress_file(p1, p2, compression=compression)
|
365 |
+
|
366 |
+
# read compressed file
|
367 |
+
df2 = pd.read_pickle(p2, compression=compression)
|
368 |
+
tm.assert_frame_equal(df, df2)
|
369 |
+
|
370 |
+
def test_read_infer(self, compression_ext, get_random_path):
|
371 |
+
base = get_random_path
|
372 |
+
path1 = base + ".raw"
|
373 |
+
path2 = base + compression_ext
|
374 |
+
compression = self._extension_to_compression.get(compression_ext.lower())
|
375 |
+
|
376 |
+
with tm.ensure_clean(path1) as p1, tm.ensure_clean(path2) as p2:
|
377 |
+
df = DataFrame(
|
378 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
379 |
+
columns=Index(list("ABCD"), dtype=object),
|
380 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
381 |
+
)
|
382 |
+
|
383 |
+
# write to uncompressed file
|
384 |
+
df.to_pickle(p1, compression=None)
|
385 |
+
|
386 |
+
# compress
|
387 |
+
self.compress_file(p1, p2, compression=compression)
|
388 |
+
|
389 |
+
# read compressed file by inferred compression method
|
390 |
+
df2 = pd.read_pickle(p2)
|
391 |
+
tm.assert_frame_equal(df, df2)
|
392 |
+
|
393 |
+
|
394 |
+
# ---------------------
|
395 |
+
# test pickle compression
|
396 |
+
# ---------------------
|
397 |
+
|
398 |
+
|
399 |
+
class TestProtocol:
|
400 |
+
@pytest.mark.parametrize("protocol", [-1, 0, 1, 2])
|
401 |
+
def test_read(self, protocol, get_random_path):
|
402 |
+
with tm.ensure_clean(get_random_path) as path:
|
403 |
+
df = DataFrame(
|
404 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
405 |
+
columns=Index(list("ABCD"), dtype=object),
|
406 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
407 |
+
)
|
408 |
+
df.to_pickle(path, protocol=protocol)
|
409 |
+
df2 = pd.read_pickle(path)
|
410 |
+
tm.assert_frame_equal(df, df2)
|
411 |
+
|
412 |
+
|
413 |
+
@pytest.mark.parametrize(
|
414 |
+
["pickle_file", "excols"],
|
415 |
+
[
|
416 |
+
("test_py27.pkl", Index(["a", "b", "c"])),
|
417 |
+
(
|
418 |
+
"test_mi_py27.pkl",
|
419 |
+
pd.MultiIndex.from_arrays([["a", "b", "c"], ["A", "B", "C"]]),
|
420 |
+
),
|
421 |
+
],
|
422 |
+
)
|
423 |
+
def test_unicode_decode_error(datapath, pickle_file, excols):
|
424 |
+
# pickle file written with py27, should be readable without raising
|
425 |
+
# UnicodeDecodeError, see GH#28645 and GH#31988
|
426 |
+
path = datapath("io", "data", "pickle", pickle_file)
|
427 |
+
df = pd.read_pickle(path)
|
428 |
+
|
429 |
+
# just test the columns are correct since the values are random
|
430 |
+
tm.assert_index_equal(df.columns, excols)
|
431 |
+
|
432 |
+
|
433 |
+
# ---------------------
|
434 |
+
# tests for buffer I/O
|
435 |
+
# ---------------------
|
436 |
+
|
437 |
+
|
438 |
+
def test_pickle_buffer_roundtrip():
|
439 |
+
with tm.ensure_clean() as path:
|
440 |
+
df = DataFrame(
|
441 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
442 |
+
columns=Index(list("ABCD"), dtype=object),
|
443 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
444 |
+
)
|
445 |
+
with open(path, "wb") as fh:
|
446 |
+
df.to_pickle(fh)
|
447 |
+
with open(path, "rb") as fh:
|
448 |
+
result = pd.read_pickle(fh)
|
449 |
+
tm.assert_frame_equal(df, result)
|
450 |
+
|
451 |
+
|
452 |
+
# ---------------------
|
453 |
+
# tests for URL I/O
|
454 |
+
# ---------------------
|
455 |
+
|
456 |
+
|
457 |
+
@pytest.mark.parametrize(
|
458 |
+
"mockurl", ["http://url.com", "ftp://test.com", "http://gzip.com"]
|
459 |
+
)
|
460 |
+
def test_pickle_generalurl_read(monkeypatch, mockurl):
|
461 |
+
def python_pickler(obj, path):
|
462 |
+
with open(path, "wb") as fh:
|
463 |
+
pickle.dump(obj, fh, protocol=-1)
|
464 |
+
|
465 |
+
class MockReadResponse:
|
466 |
+
def __init__(self, path) -> None:
|
467 |
+
self.file = open(path, "rb")
|
468 |
+
if "gzip" in path:
|
469 |
+
self.headers = {"Content-Encoding": "gzip"}
|
470 |
+
else:
|
471 |
+
self.headers = {"Content-Encoding": ""}
|
472 |
+
|
473 |
+
def __enter__(self):
|
474 |
+
return self
|
475 |
+
|
476 |
+
def __exit__(self, *args):
|
477 |
+
self.close()
|
478 |
+
|
479 |
+
def read(self):
|
480 |
+
return self.file.read()
|
481 |
+
|
482 |
+
def close(self):
|
483 |
+
return self.file.close()
|
484 |
+
|
485 |
+
with tm.ensure_clean() as path:
|
486 |
+
|
487 |
+
def mock_urlopen_read(*args, **kwargs):
|
488 |
+
return MockReadResponse(path)
|
489 |
+
|
490 |
+
df = DataFrame(
|
491 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
492 |
+
columns=Index(list("ABCD"), dtype=object),
|
493 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
494 |
+
)
|
495 |
+
python_pickler(df, path)
|
496 |
+
monkeypatch.setattr("urllib.request.urlopen", mock_urlopen_read)
|
497 |
+
result = pd.read_pickle(mockurl)
|
498 |
+
tm.assert_frame_equal(df, result)
|
499 |
+
|
500 |
+
|
501 |
+
def test_pickle_fsspec_roundtrip():
|
502 |
+
pytest.importorskip("fsspec")
|
503 |
+
with tm.ensure_clean():
|
504 |
+
mockurl = "memory://mockfile"
|
505 |
+
df = DataFrame(
|
506 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
507 |
+
columns=Index(list("ABCD"), dtype=object),
|
508 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
509 |
+
)
|
510 |
+
df.to_pickle(mockurl)
|
511 |
+
result = pd.read_pickle(mockurl)
|
512 |
+
tm.assert_frame_equal(df, result)
|
513 |
+
|
514 |
+
|
515 |
+
class MyTz(datetime.tzinfo):
|
516 |
+
def __init__(self) -> None:
|
517 |
+
pass
|
518 |
+
|
519 |
+
|
520 |
+
def test_read_pickle_with_subclass():
|
521 |
+
# GH 12163
|
522 |
+
expected = Series(dtype=object), MyTz()
|
523 |
+
result = tm.round_trip_pickle(expected)
|
524 |
+
|
525 |
+
tm.assert_series_equal(result[0], expected[0])
|
526 |
+
assert isinstance(result[1], MyTz)
|
527 |
+
|
528 |
+
|
529 |
+
def test_pickle_binary_object_compression(compression):
|
530 |
+
"""
|
531 |
+
Read/write from binary file-objects w/wo compression.
|
532 |
+
|
533 |
+
GH 26237, GH 29054, and GH 29570
|
534 |
+
"""
|
535 |
+
df = DataFrame(
|
536 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
537 |
+
columns=Index(list("ABCD"), dtype=object),
|
538 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
539 |
+
)
|
540 |
+
|
541 |
+
# reference for compression
|
542 |
+
with tm.ensure_clean() as path:
|
543 |
+
df.to_pickle(path, compression=compression)
|
544 |
+
reference = Path(path).read_bytes()
|
545 |
+
|
546 |
+
# write
|
547 |
+
buffer = io.BytesIO()
|
548 |
+
df.to_pickle(buffer, compression=compression)
|
549 |
+
buffer.seek(0)
|
550 |
+
|
551 |
+
# gzip and zip safe the filename: cannot compare the compressed content
|
552 |
+
assert buffer.getvalue() == reference or compression in ("gzip", "zip", "tar")
|
553 |
+
|
554 |
+
# read
|
555 |
+
read_df = pd.read_pickle(buffer, compression=compression)
|
556 |
+
buffer.seek(0)
|
557 |
+
tm.assert_frame_equal(df, read_df)
|
558 |
+
|
559 |
+
|
560 |
+
def test_pickle_dataframe_with_multilevel_index(
|
561 |
+
multiindex_year_month_day_dataframe_random_data,
|
562 |
+
multiindex_dataframe_random_data,
|
563 |
+
):
|
564 |
+
ymd = multiindex_year_month_day_dataframe_random_data
|
565 |
+
frame = multiindex_dataframe_random_data
|
566 |
+
|
567 |
+
def _test_roundtrip(frame):
|
568 |
+
unpickled = tm.round_trip_pickle(frame)
|
569 |
+
tm.assert_frame_equal(frame, unpickled)
|
570 |
+
|
571 |
+
_test_roundtrip(frame)
|
572 |
+
_test_roundtrip(frame.T)
|
573 |
+
_test_roundtrip(ymd)
|
574 |
+
_test_roundtrip(ymd.T)
|
575 |
+
|
576 |
+
|
577 |
+
def test_pickle_timeseries_periodindex():
|
578 |
+
# GH#2891
|
579 |
+
prng = period_range("1/1/2011", "1/1/2012", freq="M")
|
580 |
+
ts = Series(np.random.default_rng(2).standard_normal(len(prng)), prng)
|
581 |
+
new_ts = tm.round_trip_pickle(ts)
|
582 |
+
assert new_ts.index.freqstr == "M"
|
583 |
+
|
584 |
+
|
585 |
+
@pytest.mark.parametrize(
|
586 |
+
"name", [777, 777.0, "name", datetime.datetime(2001, 11, 11), (1, 2)]
|
587 |
+
)
|
588 |
+
def test_pickle_preserve_name(name):
|
589 |
+
unpickled = tm.round_trip_pickle(Series(np.arange(10, dtype=np.float64), name=name))
|
590 |
+
assert unpickled.name == name
|
591 |
+
|
592 |
+
|
593 |
+
def test_pickle_datetimes(datetime_series):
|
594 |
+
unp_ts = tm.round_trip_pickle(datetime_series)
|
595 |
+
tm.assert_series_equal(unp_ts, datetime_series)
|
596 |
+
|
597 |
+
|
598 |
+
def test_pickle_strings(string_series):
|
599 |
+
unp_series = tm.round_trip_pickle(string_series)
|
600 |
+
tm.assert_series_equal(unp_series, string_series)
|
601 |
+
|
602 |
+
|
603 |
+
@td.skip_array_manager_invalid_test
|
604 |
+
def test_pickle_preserves_block_ndim():
|
605 |
+
# GH#37631
|
606 |
+
ser = Series(list("abc")).astype("category").iloc[[0]]
|
607 |
+
res = tm.round_trip_pickle(ser)
|
608 |
+
|
609 |
+
assert res._mgr.blocks[0].ndim == 1
|
610 |
+
assert res._mgr.blocks[0].shape == (1,)
|
611 |
+
|
612 |
+
# GH#37631 OP issue was about indexing, underlying problem was pickle
|
613 |
+
tm.assert_series_equal(res[[True]], ser)
|
614 |
+
|
615 |
+
|
616 |
+
@pytest.mark.parametrize("protocol", [pickle.DEFAULT_PROTOCOL, pickle.HIGHEST_PROTOCOL])
|
617 |
+
def test_pickle_big_dataframe_compression(protocol, compression):
|
618 |
+
# GH#39002
|
619 |
+
df = DataFrame(range(100000))
|
620 |
+
result = tm.round_trip_pathlib(
|
621 |
+
partial(df.to_pickle, protocol=protocol, compression=compression),
|
622 |
+
partial(pd.read_pickle, compression=compression),
|
623 |
+
)
|
624 |
+
tm.assert_frame_equal(df, result)
|
625 |
+
|
626 |
+
|
627 |
+
def test_pickle_frame_v124_unpickle_130(datapath):
|
628 |
+
# GH#42345 DataFrame created in 1.2.x, unpickle in 1.3.x
|
629 |
+
path = datapath(
|
630 |
+
Path(__file__).parent,
|
631 |
+
"data",
|
632 |
+
"legacy_pickle",
|
633 |
+
"1.2.4",
|
634 |
+
"empty_frame_v1_2_4-GH#42345.pkl",
|
635 |
+
)
|
636 |
+
with open(path, "rb") as fd:
|
637 |
+
df = pickle.load(fd)
|
638 |
+
|
639 |
+
expected = DataFrame(index=[], columns=[])
|
640 |
+
tm.assert_frame_equal(df, expected)
|
641 |
+
|
642 |
+
|
643 |
+
def test_pickle_pos_args_deprecation():
|
644 |
+
# GH-54229
|
645 |
+
df = DataFrame({"a": [1, 2, 3]})
|
646 |
+
msg = (
|
647 |
+
r"Starting with pandas version 3.0 all arguments of to_pickle except for the "
|
648 |
+
r"argument 'path' will be keyword-only."
|
649 |
+
)
|
650 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
651 |
+
buffer = io.BytesIO()
|
652 |
+
df.to_pickle(buffer, "infer")
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_s3.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import BytesIO
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas import read_csv
|
6 |
+
|
7 |
+
|
8 |
+
def test_streaming_s3_objects():
|
9 |
+
# GH17135
|
10 |
+
# botocore gained iteration support in 1.10.47, can now be used in read_*
|
11 |
+
pytest.importorskip("botocore", minversion="1.10.47")
|
12 |
+
from botocore.response import StreamingBody
|
13 |
+
|
14 |
+
data = [b"foo,bar,baz\n1,2,3\n4,5,6\n", b"just,the,header\n"]
|
15 |
+
for el in data:
|
16 |
+
body = StreamingBody(BytesIO(el), content_length=len(el))
|
17 |
+
read_csv(body)
|
18 |
+
|
19 |
+
|
20 |
+
@pytest.mark.single_cpu
|
21 |
+
def test_read_without_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
|
22 |
+
# GH 34626
|
23 |
+
pytest.importorskip("s3fs")
|
24 |
+
result = read_csv(
|
25 |
+
f"s3://{s3_public_bucket_with_data.name}/tips.csv",
|
26 |
+
nrows=3,
|
27 |
+
storage_options=s3so,
|
28 |
+
)
|
29 |
+
assert len(result) == 3
|
30 |
+
|
31 |
+
|
32 |
+
@pytest.mark.single_cpu
|
33 |
+
def test_read_with_creds_from_pub_bucket(s3_public_bucket_with_data, s3so):
|
34 |
+
# Ensure we can read from a public bucket with credentials
|
35 |
+
# GH 34626
|
36 |
+
pytest.importorskip("s3fs")
|
37 |
+
df = read_csv(
|
38 |
+
f"s3://{s3_public_bucket_with_data.name}/tips.csv",
|
39 |
+
nrows=5,
|
40 |
+
header=None,
|
41 |
+
storage_options=s3so,
|
42 |
+
)
|
43 |
+
assert len(df) == 5
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_spss.py
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
import pandas as pd
|
8 |
+
import pandas._testing as tm
|
9 |
+
from pandas.util.version import Version
|
10 |
+
|
11 |
+
pyreadstat = pytest.importorskip("pyreadstat")
|
12 |
+
|
13 |
+
|
14 |
+
# TODO(CoW) - detection of chained assignment in cython
|
15 |
+
# https://github.com/pandas-dev/pandas/issues/51315
|
16 |
+
@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError")
|
17 |
+
@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning")
|
18 |
+
@pytest.mark.parametrize("path_klass", [lambda p: p, Path])
|
19 |
+
def test_spss_labelled_num(path_klass, datapath):
|
20 |
+
# test file from the Haven project (https://haven.tidyverse.org/)
|
21 |
+
# Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT
|
22 |
+
fname = path_klass(datapath("io", "data", "spss", "labelled-num.sav"))
|
23 |
+
|
24 |
+
df = pd.read_spss(fname, convert_categoricals=True)
|
25 |
+
expected = pd.DataFrame({"VAR00002": "This is one"}, index=[0])
|
26 |
+
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
|
27 |
+
tm.assert_frame_equal(df, expected)
|
28 |
+
|
29 |
+
df = pd.read_spss(fname, convert_categoricals=False)
|
30 |
+
expected = pd.DataFrame({"VAR00002": 1.0}, index=[0])
|
31 |
+
tm.assert_frame_equal(df, expected)
|
32 |
+
|
33 |
+
|
34 |
+
@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError")
|
35 |
+
@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning")
|
36 |
+
def test_spss_labelled_num_na(datapath):
|
37 |
+
# test file from the Haven project (https://haven.tidyverse.org/)
|
38 |
+
# Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT
|
39 |
+
fname = datapath("io", "data", "spss", "labelled-num-na.sav")
|
40 |
+
|
41 |
+
df = pd.read_spss(fname, convert_categoricals=True)
|
42 |
+
expected = pd.DataFrame({"VAR00002": ["This is one", None]})
|
43 |
+
expected["VAR00002"] = pd.Categorical(expected["VAR00002"])
|
44 |
+
tm.assert_frame_equal(df, expected)
|
45 |
+
|
46 |
+
df = pd.read_spss(fname, convert_categoricals=False)
|
47 |
+
expected = pd.DataFrame({"VAR00002": [1.0, np.nan]})
|
48 |
+
tm.assert_frame_equal(df, expected)
|
49 |
+
|
50 |
+
|
51 |
+
@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError")
|
52 |
+
@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning")
|
53 |
+
def test_spss_labelled_str(datapath):
|
54 |
+
# test file from the Haven project (https://haven.tidyverse.org/)
|
55 |
+
# Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT
|
56 |
+
fname = datapath("io", "data", "spss", "labelled-str.sav")
|
57 |
+
|
58 |
+
df = pd.read_spss(fname, convert_categoricals=True)
|
59 |
+
expected = pd.DataFrame({"gender": ["Male", "Female"]})
|
60 |
+
expected["gender"] = pd.Categorical(expected["gender"])
|
61 |
+
tm.assert_frame_equal(df, expected)
|
62 |
+
|
63 |
+
df = pd.read_spss(fname, convert_categoricals=False)
|
64 |
+
expected = pd.DataFrame({"gender": ["M", "F"]})
|
65 |
+
tm.assert_frame_equal(df, expected)
|
66 |
+
|
67 |
+
|
68 |
+
@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError")
|
69 |
+
@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning")
|
70 |
+
def test_spss_umlauts(datapath):
|
71 |
+
# test file from the Haven project (https://haven.tidyverse.org/)
|
72 |
+
# Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT
|
73 |
+
fname = datapath("io", "data", "spss", "umlauts.sav")
|
74 |
+
|
75 |
+
df = pd.read_spss(fname, convert_categoricals=True)
|
76 |
+
expected = pd.DataFrame(
|
77 |
+
{"var1": ["the ä umlaut", "the ü umlaut", "the ä umlaut", "the ö umlaut"]}
|
78 |
+
)
|
79 |
+
expected["var1"] = pd.Categorical(expected["var1"])
|
80 |
+
tm.assert_frame_equal(df, expected)
|
81 |
+
|
82 |
+
df = pd.read_spss(fname, convert_categoricals=False)
|
83 |
+
expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]})
|
84 |
+
tm.assert_frame_equal(df, expected)
|
85 |
+
|
86 |
+
|
87 |
+
def test_spss_usecols(datapath):
|
88 |
+
# usecols must be list-like
|
89 |
+
fname = datapath("io", "data", "spss", "labelled-num.sav")
|
90 |
+
|
91 |
+
with pytest.raises(TypeError, match="usecols must be list-like."):
|
92 |
+
pd.read_spss(fname, usecols="VAR00002")
|
93 |
+
|
94 |
+
|
95 |
+
def test_spss_umlauts_dtype_backend(datapath, dtype_backend):
|
96 |
+
# test file from the Haven project (https://haven.tidyverse.org/)
|
97 |
+
# Licence at LICENSES/HAVEN_LICENSE, LICENSES/HAVEN_MIT
|
98 |
+
fname = datapath("io", "data", "spss", "umlauts.sav")
|
99 |
+
|
100 |
+
df = pd.read_spss(fname, convert_categoricals=False, dtype_backend=dtype_backend)
|
101 |
+
expected = pd.DataFrame({"var1": [1.0, 2.0, 1.0, 3.0]}, dtype="Int64")
|
102 |
+
|
103 |
+
if dtype_backend == "pyarrow":
|
104 |
+
pa = pytest.importorskip("pyarrow")
|
105 |
+
|
106 |
+
from pandas.arrays import ArrowExtensionArray
|
107 |
+
|
108 |
+
expected = pd.DataFrame(
|
109 |
+
{
|
110 |
+
col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))
|
111 |
+
for col in expected.columns
|
112 |
+
}
|
113 |
+
)
|
114 |
+
|
115 |
+
tm.assert_frame_equal(df, expected)
|
116 |
+
|
117 |
+
|
118 |
+
def test_invalid_dtype_backend():
|
119 |
+
msg = (
|
120 |
+
"dtype_backend numpy is invalid, only 'numpy_nullable' and "
|
121 |
+
"'pyarrow' are allowed."
|
122 |
+
)
|
123 |
+
with pytest.raises(ValueError, match=msg):
|
124 |
+
pd.read_spss("test", dtype_backend="numpy")
|
125 |
+
|
126 |
+
|
127 |
+
@pytest.mark.filterwarnings("ignore::pandas.errors.ChainedAssignmentError")
|
128 |
+
@pytest.mark.filterwarnings("ignore:ChainedAssignmentError:FutureWarning")
|
129 |
+
def test_spss_metadata(datapath):
|
130 |
+
# GH 54264
|
131 |
+
fname = datapath("io", "data", "spss", "labelled-num.sav")
|
132 |
+
|
133 |
+
df = pd.read_spss(fname)
|
134 |
+
metadata = {
|
135 |
+
"column_names": ["VAR00002"],
|
136 |
+
"column_labels": [None],
|
137 |
+
"column_names_to_labels": {"VAR00002": None},
|
138 |
+
"file_encoding": "UTF-8",
|
139 |
+
"number_columns": 1,
|
140 |
+
"number_rows": 1,
|
141 |
+
"variable_value_labels": {"VAR00002": {1.0: "This is one"}},
|
142 |
+
"value_labels": {"labels0": {1.0: "This is one"}},
|
143 |
+
"variable_to_label": {"VAR00002": "labels0"},
|
144 |
+
"notes": [],
|
145 |
+
"original_variable_types": {"VAR00002": "F8.0"},
|
146 |
+
"readstat_variable_types": {"VAR00002": "double"},
|
147 |
+
"table_name": None,
|
148 |
+
"missing_ranges": {},
|
149 |
+
"missing_user_values": {},
|
150 |
+
"variable_storage_width": {"VAR00002": 8},
|
151 |
+
"variable_display_width": {"VAR00002": 8},
|
152 |
+
"variable_alignment": {"VAR00002": "unknown"},
|
153 |
+
"variable_measure": {"VAR00002": "unknown"},
|
154 |
+
"file_label": None,
|
155 |
+
"file_format": "sav/zsav",
|
156 |
+
}
|
157 |
+
if Version(pyreadstat.__version__) >= Version("1.2.4"):
|
158 |
+
metadata.update(
|
159 |
+
{
|
160 |
+
"creation_time": datetime.datetime(2015, 2, 6, 14, 33, 36),
|
161 |
+
"modification_time": datetime.datetime(2015, 2, 6, 14, 33, 36),
|
162 |
+
}
|
163 |
+
)
|
164 |
+
assert df.attrs == metadata
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_sql.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/io/test_stata.py
ADDED
@@ -0,0 +1,2381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import bz2
|
2 |
+
import datetime as dt
|
3 |
+
from datetime import datetime
|
4 |
+
import gzip
|
5 |
+
import io
|
6 |
+
import os
|
7 |
+
import struct
|
8 |
+
import tarfile
|
9 |
+
import zipfile
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import pytest
|
13 |
+
|
14 |
+
import pandas.util._test_decorators as td
|
15 |
+
|
16 |
+
import pandas as pd
|
17 |
+
from pandas import CategoricalDtype
|
18 |
+
import pandas._testing as tm
|
19 |
+
from pandas.core.frame import (
|
20 |
+
DataFrame,
|
21 |
+
Series,
|
22 |
+
)
|
23 |
+
|
24 |
+
from pandas.io.parsers import read_csv
|
25 |
+
from pandas.io.stata import (
|
26 |
+
CategoricalConversionWarning,
|
27 |
+
InvalidColumnName,
|
28 |
+
PossiblePrecisionLoss,
|
29 |
+
StataMissingValue,
|
30 |
+
StataReader,
|
31 |
+
StataWriter,
|
32 |
+
StataWriterUTF8,
|
33 |
+
ValueLabelTypeMismatch,
|
34 |
+
read_stata,
|
35 |
+
)
|
36 |
+
|
37 |
+
|
38 |
+
@pytest.fixture
|
39 |
+
def mixed_frame():
|
40 |
+
return DataFrame(
|
41 |
+
{
|
42 |
+
"a": [1, 2, 3, 4],
|
43 |
+
"b": [1.0, 3.0, 27.0, 81.0],
|
44 |
+
"c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
|
45 |
+
}
|
46 |
+
)
|
47 |
+
|
48 |
+
|
49 |
+
@pytest.fixture
|
50 |
+
def parsed_114(datapath):
|
51 |
+
dta14_114 = datapath("io", "data", "stata", "stata5_114.dta")
|
52 |
+
parsed_114 = read_stata(dta14_114, convert_dates=True)
|
53 |
+
parsed_114.index.name = "index"
|
54 |
+
return parsed_114
|
55 |
+
|
56 |
+
|
57 |
+
class TestStata:
|
58 |
+
def read_dta(self, file):
|
59 |
+
# Legacy default reader configuration
|
60 |
+
return read_stata(file, convert_dates=True)
|
61 |
+
|
62 |
+
def read_csv(self, file):
|
63 |
+
return read_csv(file, parse_dates=True)
|
64 |
+
|
65 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
66 |
+
def test_read_empty_dta(self, version):
|
67 |
+
empty_ds = DataFrame(columns=["unit"])
|
68 |
+
# GH 7369, make sure can read a 0-obs dta file
|
69 |
+
with tm.ensure_clean() as path:
|
70 |
+
empty_ds.to_stata(path, write_index=False, version=version)
|
71 |
+
empty_ds2 = read_stata(path)
|
72 |
+
tm.assert_frame_equal(empty_ds, empty_ds2)
|
73 |
+
|
74 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
75 |
+
def test_read_empty_dta_with_dtypes(self, version):
|
76 |
+
# GH 46240
|
77 |
+
# Fixing above bug revealed that types are not correctly preserved when
|
78 |
+
# writing empty DataFrames
|
79 |
+
empty_df_typed = DataFrame(
|
80 |
+
{
|
81 |
+
"i8": np.array([0], dtype=np.int8),
|
82 |
+
"i16": np.array([0], dtype=np.int16),
|
83 |
+
"i32": np.array([0], dtype=np.int32),
|
84 |
+
"i64": np.array([0], dtype=np.int64),
|
85 |
+
"u8": np.array([0], dtype=np.uint8),
|
86 |
+
"u16": np.array([0], dtype=np.uint16),
|
87 |
+
"u32": np.array([0], dtype=np.uint32),
|
88 |
+
"u64": np.array([0], dtype=np.uint64),
|
89 |
+
"f32": np.array([0], dtype=np.float32),
|
90 |
+
"f64": np.array([0], dtype=np.float64),
|
91 |
+
}
|
92 |
+
)
|
93 |
+
expected = empty_df_typed.copy()
|
94 |
+
# No uint# support. Downcast since values in range for int#
|
95 |
+
expected["u8"] = expected["u8"].astype(np.int8)
|
96 |
+
expected["u16"] = expected["u16"].astype(np.int16)
|
97 |
+
expected["u32"] = expected["u32"].astype(np.int32)
|
98 |
+
# No int64 supported at all. Downcast since values in range for int32
|
99 |
+
expected["u64"] = expected["u64"].astype(np.int32)
|
100 |
+
expected["i64"] = expected["i64"].astype(np.int32)
|
101 |
+
|
102 |
+
# GH 7369, make sure can read a 0-obs dta file
|
103 |
+
with tm.ensure_clean() as path:
|
104 |
+
empty_df_typed.to_stata(path, write_index=False, version=version)
|
105 |
+
empty_reread = read_stata(path)
|
106 |
+
tm.assert_frame_equal(expected, empty_reread)
|
107 |
+
tm.assert_series_equal(expected.dtypes, empty_reread.dtypes)
|
108 |
+
|
109 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
110 |
+
def test_read_index_col_none(self, version):
|
111 |
+
df = DataFrame({"a": range(5), "b": ["b1", "b2", "b3", "b4", "b5"]})
|
112 |
+
# GH 7369, make sure can read a 0-obs dta file
|
113 |
+
with tm.ensure_clean() as path:
|
114 |
+
df.to_stata(path, write_index=False, version=version)
|
115 |
+
read_df = read_stata(path)
|
116 |
+
|
117 |
+
assert isinstance(read_df.index, pd.RangeIndex)
|
118 |
+
expected = df.copy()
|
119 |
+
expected["a"] = expected["a"].astype(np.int32)
|
120 |
+
tm.assert_frame_equal(read_df, expected, check_index_type=True)
|
121 |
+
|
122 |
+
@pytest.mark.parametrize("file", ["stata1_114", "stata1_117"])
|
123 |
+
def test_read_dta1(self, file, datapath):
|
124 |
+
file = datapath("io", "data", "stata", f"{file}.dta")
|
125 |
+
parsed = self.read_dta(file)
|
126 |
+
|
127 |
+
# Pandas uses np.nan as missing value.
|
128 |
+
# Thus, all columns will be of type float, regardless of their name.
|
129 |
+
expected = DataFrame(
|
130 |
+
[(np.nan, np.nan, np.nan, np.nan, np.nan)],
|
131 |
+
columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
|
132 |
+
)
|
133 |
+
|
134 |
+
# this is an oddity as really the nan should be float64, but
|
135 |
+
# the casting doesn't fail so need to match stata here
|
136 |
+
expected["float_miss"] = expected["float_miss"].astype(np.float32)
|
137 |
+
|
138 |
+
tm.assert_frame_equal(parsed, expected)
|
139 |
+
|
140 |
+
def test_read_dta2(self, datapath):
|
141 |
+
expected = DataFrame.from_records(
|
142 |
+
[
|
143 |
+
(
|
144 |
+
datetime(2006, 11, 19, 23, 13, 20),
|
145 |
+
1479596223000,
|
146 |
+
datetime(2010, 1, 20),
|
147 |
+
datetime(2010, 1, 8),
|
148 |
+
datetime(2010, 1, 1),
|
149 |
+
datetime(1974, 7, 1),
|
150 |
+
datetime(2010, 1, 1),
|
151 |
+
datetime(2010, 1, 1),
|
152 |
+
),
|
153 |
+
(
|
154 |
+
datetime(1959, 12, 31, 20, 3, 20),
|
155 |
+
-1479590,
|
156 |
+
datetime(1953, 10, 2),
|
157 |
+
datetime(1948, 6, 10),
|
158 |
+
datetime(1955, 1, 1),
|
159 |
+
datetime(1955, 7, 1),
|
160 |
+
datetime(1955, 1, 1),
|
161 |
+
datetime(2, 1, 1),
|
162 |
+
),
|
163 |
+
(pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT),
|
164 |
+
],
|
165 |
+
columns=[
|
166 |
+
"datetime_c",
|
167 |
+
"datetime_big_c",
|
168 |
+
"date",
|
169 |
+
"weekly_date",
|
170 |
+
"monthly_date",
|
171 |
+
"quarterly_date",
|
172 |
+
"half_yearly_date",
|
173 |
+
"yearly_date",
|
174 |
+
],
|
175 |
+
)
|
176 |
+
expected["yearly_date"] = expected["yearly_date"].astype("O")
|
177 |
+
|
178 |
+
path1 = datapath("io", "data", "stata", "stata2_114.dta")
|
179 |
+
path2 = datapath("io", "data", "stata", "stata2_115.dta")
|
180 |
+
path3 = datapath("io", "data", "stata", "stata2_117.dta")
|
181 |
+
|
182 |
+
with tm.assert_produces_warning(UserWarning):
|
183 |
+
parsed_114 = self.read_dta(path1)
|
184 |
+
with tm.assert_produces_warning(UserWarning):
|
185 |
+
parsed_115 = self.read_dta(path2)
|
186 |
+
with tm.assert_produces_warning(UserWarning):
|
187 |
+
parsed_117 = self.read_dta(path3)
|
188 |
+
# FIXME: don't leave commented-out
|
189 |
+
# 113 is buggy due to limits of date format support in Stata
|
190 |
+
# parsed_113 = self.read_dta(
|
191 |
+
# datapath("io", "data", "stata", "stata2_113.dta")
|
192 |
+
# )
|
193 |
+
|
194 |
+
# FIXME: don't leave commented-out
|
195 |
+
# buggy test because of the NaT comparison on certain platforms
|
196 |
+
# Format 113 test fails since it does not support tc and tC formats
|
197 |
+
# tm.assert_frame_equal(parsed_113, expected)
|
198 |
+
tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True)
|
199 |
+
tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True)
|
200 |
+
tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True)
|
201 |
+
|
202 |
+
@pytest.mark.parametrize(
|
203 |
+
"file", ["stata3_113", "stata3_114", "stata3_115", "stata3_117"]
|
204 |
+
)
|
205 |
+
def test_read_dta3(self, file, datapath):
|
206 |
+
file = datapath("io", "data", "stata", f"{file}.dta")
|
207 |
+
parsed = self.read_dta(file)
|
208 |
+
|
209 |
+
# match stata here
|
210 |
+
expected = self.read_csv(datapath("io", "data", "stata", "stata3.csv"))
|
211 |
+
expected = expected.astype(np.float32)
|
212 |
+
expected["year"] = expected["year"].astype(np.int16)
|
213 |
+
expected["quarter"] = expected["quarter"].astype(np.int8)
|
214 |
+
|
215 |
+
tm.assert_frame_equal(parsed, expected)
|
216 |
+
|
217 |
+
@pytest.mark.parametrize(
|
218 |
+
"file", ["stata4_113", "stata4_114", "stata4_115", "stata4_117"]
|
219 |
+
)
|
220 |
+
def test_read_dta4(self, file, datapath):
|
221 |
+
file = datapath("io", "data", "stata", f"{file}.dta")
|
222 |
+
parsed = self.read_dta(file)
|
223 |
+
|
224 |
+
expected = DataFrame.from_records(
|
225 |
+
[
|
226 |
+
["one", "ten", "one", "one", "one"],
|
227 |
+
["two", "nine", "two", "two", "two"],
|
228 |
+
["three", "eight", "three", "three", "three"],
|
229 |
+
["four", "seven", 4, "four", "four"],
|
230 |
+
["five", "six", 5, np.nan, "five"],
|
231 |
+
["six", "five", 6, np.nan, "six"],
|
232 |
+
["seven", "four", 7, np.nan, "seven"],
|
233 |
+
["eight", "three", 8, np.nan, "eight"],
|
234 |
+
["nine", "two", 9, np.nan, "nine"],
|
235 |
+
["ten", "one", "ten", np.nan, "ten"],
|
236 |
+
],
|
237 |
+
columns=[
|
238 |
+
"fully_labeled",
|
239 |
+
"fully_labeled2",
|
240 |
+
"incompletely_labeled",
|
241 |
+
"labeled_with_missings",
|
242 |
+
"float_labelled",
|
243 |
+
],
|
244 |
+
)
|
245 |
+
|
246 |
+
# these are all categoricals
|
247 |
+
for col in expected:
|
248 |
+
orig = expected[col].copy()
|
249 |
+
|
250 |
+
categories = np.asarray(expected["fully_labeled"][orig.notna()])
|
251 |
+
if col == "incompletely_labeled":
|
252 |
+
categories = orig
|
253 |
+
|
254 |
+
cat = orig.astype("category")._values
|
255 |
+
cat = cat.set_categories(categories, ordered=True)
|
256 |
+
cat.categories.rename(None, inplace=True)
|
257 |
+
|
258 |
+
expected[col] = cat
|
259 |
+
|
260 |
+
# stata doesn't save .category metadata
|
261 |
+
tm.assert_frame_equal(parsed, expected)
|
262 |
+
|
263 |
+
# File containing strls
|
264 |
+
def test_read_dta12(self, datapath):
|
265 |
+
parsed_117 = self.read_dta(datapath("io", "data", "stata", "stata12_117.dta"))
|
266 |
+
expected = DataFrame.from_records(
|
267 |
+
[
|
268 |
+
[1, "abc", "abcdefghi"],
|
269 |
+
[3, "cba", "qwertywertyqwerty"],
|
270 |
+
[93, "", "strl"],
|
271 |
+
],
|
272 |
+
columns=["x", "y", "z"],
|
273 |
+
)
|
274 |
+
|
275 |
+
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
|
276 |
+
|
277 |
+
def test_read_dta18(self, datapath):
|
278 |
+
parsed_118 = self.read_dta(datapath("io", "data", "stata", "stata14_118.dta"))
|
279 |
+
parsed_118["Bytes"] = parsed_118["Bytes"].astype("O")
|
280 |
+
expected = DataFrame.from_records(
|
281 |
+
[
|
282 |
+
["Cat", "Bogota", "Bogotá", 1, 1.0, "option b Ünicode", 1.0],
|
283 |
+
["Dog", "Boston", "Uzunköprü", np.nan, np.nan, np.nan, np.nan],
|
284 |
+
["Plane", "Rome", "Tromsø", 0, 0.0, "option a", 0.0],
|
285 |
+
["Potato", "Tokyo", "Elâzığ", -4, 4.0, 4, 4], # noqa: RUF001
|
286 |
+
["", "", "", 0, 0.3332999, "option a", 1 / 3.0],
|
287 |
+
],
|
288 |
+
columns=[
|
289 |
+
"Things",
|
290 |
+
"Cities",
|
291 |
+
"Unicode_Cities_Strl",
|
292 |
+
"Ints",
|
293 |
+
"Floats",
|
294 |
+
"Bytes",
|
295 |
+
"Longs",
|
296 |
+
],
|
297 |
+
)
|
298 |
+
expected["Floats"] = expected["Floats"].astype(np.float32)
|
299 |
+
for col in parsed_118.columns:
|
300 |
+
tm.assert_almost_equal(parsed_118[col], expected[col])
|
301 |
+
|
302 |
+
with StataReader(datapath("io", "data", "stata", "stata14_118.dta")) as rdr:
|
303 |
+
vl = rdr.variable_labels()
|
304 |
+
vl_expected = {
|
305 |
+
"Unicode_Cities_Strl": "Here are some strls with Ünicode chars",
|
306 |
+
"Longs": "long data",
|
307 |
+
"Things": "Here are some things",
|
308 |
+
"Bytes": "byte data",
|
309 |
+
"Ints": "int data",
|
310 |
+
"Cities": "Here are some cities",
|
311 |
+
"Floats": "float data",
|
312 |
+
}
|
313 |
+
tm.assert_dict_equal(vl, vl_expected)
|
314 |
+
|
315 |
+
assert rdr.data_label == "This is a Ünicode data label"
|
316 |
+
|
317 |
+
def test_read_write_dta5(self):
|
318 |
+
original = DataFrame(
|
319 |
+
[(np.nan, np.nan, np.nan, np.nan, np.nan)],
|
320 |
+
columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
|
321 |
+
)
|
322 |
+
original.index.name = "index"
|
323 |
+
|
324 |
+
with tm.ensure_clean() as path:
|
325 |
+
original.to_stata(path, convert_dates=None)
|
326 |
+
written_and_read_again = self.read_dta(path)
|
327 |
+
|
328 |
+
expected = original.copy()
|
329 |
+
expected.index = expected.index.astype(np.int32)
|
330 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
331 |
+
|
332 |
+
def test_write_dta6(self, datapath):
|
333 |
+
original = self.read_csv(datapath("io", "data", "stata", "stata3.csv"))
|
334 |
+
original.index.name = "index"
|
335 |
+
original.index = original.index.astype(np.int32)
|
336 |
+
original["year"] = original["year"].astype(np.int32)
|
337 |
+
original["quarter"] = original["quarter"].astype(np.int32)
|
338 |
+
|
339 |
+
with tm.ensure_clean() as path:
|
340 |
+
original.to_stata(path, convert_dates=None)
|
341 |
+
written_and_read_again = self.read_dta(path)
|
342 |
+
tm.assert_frame_equal(
|
343 |
+
written_and_read_again.set_index("index"),
|
344 |
+
original,
|
345 |
+
check_index_type=False,
|
346 |
+
)
|
347 |
+
|
348 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
349 |
+
def test_read_write_dta10(self, version):
|
350 |
+
original = DataFrame(
|
351 |
+
data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]],
|
352 |
+
columns=["string", "object", "integer", "floating", "datetime"],
|
353 |
+
)
|
354 |
+
original["object"] = Series(original["object"], dtype=object)
|
355 |
+
original.index.name = "index"
|
356 |
+
original.index = original.index.astype(np.int32)
|
357 |
+
original["integer"] = original["integer"].astype(np.int32)
|
358 |
+
|
359 |
+
with tm.ensure_clean() as path:
|
360 |
+
original.to_stata(path, convert_dates={"datetime": "tc"}, version=version)
|
361 |
+
written_and_read_again = self.read_dta(path)
|
362 |
+
# original.index is np.int32, read index is np.int64
|
363 |
+
tm.assert_frame_equal(
|
364 |
+
written_and_read_again.set_index("index"),
|
365 |
+
original,
|
366 |
+
check_index_type=False,
|
367 |
+
)
|
368 |
+
|
369 |
+
def test_stata_doc_examples(self):
|
370 |
+
with tm.ensure_clean() as path:
|
371 |
+
df = DataFrame(
|
372 |
+
np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
|
373 |
+
)
|
374 |
+
df.to_stata(path)
|
375 |
+
|
376 |
+
def test_write_preserves_original(self):
|
377 |
+
# 9795
|
378 |
+
|
379 |
+
df = DataFrame(
|
380 |
+
np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd")
|
381 |
+
)
|
382 |
+
df.loc[2, "a":"c"] = np.nan
|
383 |
+
df_copy = df.copy()
|
384 |
+
with tm.ensure_clean() as path:
|
385 |
+
df.to_stata(path, write_index=False)
|
386 |
+
tm.assert_frame_equal(df, df_copy)
|
387 |
+
|
388 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
389 |
+
def test_encoding(self, version, datapath):
|
390 |
+
# GH 4626, proper encoding handling
|
391 |
+
raw = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta"))
|
392 |
+
encoded = read_stata(datapath("io", "data", "stata", "stata1_encoding.dta"))
|
393 |
+
result = encoded.kreis1849[0]
|
394 |
+
|
395 |
+
expected = raw.kreis1849[0]
|
396 |
+
assert result == expected
|
397 |
+
assert isinstance(result, str)
|
398 |
+
|
399 |
+
with tm.ensure_clean() as path:
|
400 |
+
encoded.to_stata(path, write_index=False, version=version)
|
401 |
+
reread_encoded = read_stata(path)
|
402 |
+
tm.assert_frame_equal(encoded, reread_encoded)
|
403 |
+
|
404 |
+
def test_read_write_dta11(self):
|
405 |
+
original = DataFrame(
|
406 |
+
[(1, 2, 3, 4)],
|
407 |
+
columns=[
|
408 |
+
"good",
|
409 |
+
"b\u00E4d",
|
410 |
+
"8number",
|
411 |
+
"astringwithmorethan32characters______",
|
412 |
+
],
|
413 |
+
)
|
414 |
+
formatted = DataFrame(
|
415 |
+
[(1, 2, 3, 4)],
|
416 |
+
columns=["good", "b_d", "_8number", "astringwithmorethan32characters_"],
|
417 |
+
)
|
418 |
+
formatted.index.name = "index"
|
419 |
+
formatted = formatted.astype(np.int32)
|
420 |
+
|
421 |
+
with tm.ensure_clean() as path:
|
422 |
+
with tm.assert_produces_warning(InvalidColumnName):
|
423 |
+
original.to_stata(path, convert_dates=None)
|
424 |
+
|
425 |
+
written_and_read_again = self.read_dta(path)
|
426 |
+
|
427 |
+
expected = formatted.copy()
|
428 |
+
expected.index = expected.index.astype(np.int32)
|
429 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
430 |
+
|
431 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
432 |
+
def test_read_write_dta12(self, version):
|
433 |
+
original = DataFrame(
|
434 |
+
[(1, 2, 3, 4, 5, 6)],
|
435 |
+
columns=[
|
436 |
+
"astringwithmorethan32characters_1",
|
437 |
+
"astringwithmorethan32characters_2",
|
438 |
+
"+",
|
439 |
+
"-",
|
440 |
+
"short",
|
441 |
+
"delete",
|
442 |
+
],
|
443 |
+
)
|
444 |
+
formatted = DataFrame(
|
445 |
+
[(1, 2, 3, 4, 5, 6)],
|
446 |
+
columns=[
|
447 |
+
"astringwithmorethan32characters_",
|
448 |
+
"_0astringwithmorethan32character",
|
449 |
+
"_",
|
450 |
+
"_1_",
|
451 |
+
"_short",
|
452 |
+
"_delete",
|
453 |
+
],
|
454 |
+
)
|
455 |
+
formatted.index.name = "index"
|
456 |
+
formatted = formatted.astype(np.int32)
|
457 |
+
|
458 |
+
with tm.ensure_clean() as path:
|
459 |
+
with tm.assert_produces_warning(InvalidColumnName):
|
460 |
+
original.to_stata(path, convert_dates=None, version=version)
|
461 |
+
# should get a warning for that format.
|
462 |
+
|
463 |
+
written_and_read_again = self.read_dta(path)
|
464 |
+
|
465 |
+
expected = formatted.copy()
|
466 |
+
expected.index = expected.index.astype(np.int32)
|
467 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
468 |
+
|
469 |
+
def test_read_write_dta13(self):
|
470 |
+
s1 = Series(2**9, dtype=np.int16)
|
471 |
+
s2 = Series(2**17, dtype=np.int32)
|
472 |
+
s3 = Series(2**33, dtype=np.int64)
|
473 |
+
original = DataFrame({"int16": s1, "int32": s2, "int64": s3})
|
474 |
+
original.index.name = "index"
|
475 |
+
|
476 |
+
formatted = original
|
477 |
+
formatted["int64"] = formatted["int64"].astype(np.float64)
|
478 |
+
|
479 |
+
with tm.ensure_clean() as path:
|
480 |
+
original.to_stata(path)
|
481 |
+
written_and_read_again = self.read_dta(path)
|
482 |
+
|
483 |
+
expected = formatted.copy()
|
484 |
+
expected.index = expected.index.astype(np.int32)
|
485 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
486 |
+
|
487 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
488 |
+
@pytest.mark.parametrize(
|
489 |
+
"file", ["stata5_113", "stata5_114", "stata5_115", "stata5_117"]
|
490 |
+
)
|
491 |
+
def test_read_write_reread_dta14(self, file, parsed_114, version, datapath):
|
492 |
+
file = datapath("io", "data", "stata", f"{file}.dta")
|
493 |
+
parsed = self.read_dta(file)
|
494 |
+
parsed.index.name = "index"
|
495 |
+
|
496 |
+
tm.assert_frame_equal(parsed_114, parsed)
|
497 |
+
|
498 |
+
with tm.ensure_clean() as path:
|
499 |
+
parsed_114.to_stata(path, convert_dates={"date_td": "td"}, version=version)
|
500 |
+
written_and_read_again = self.read_dta(path)
|
501 |
+
|
502 |
+
expected = parsed_114.copy()
|
503 |
+
expected.index = expected.index.astype(np.int32)
|
504 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
505 |
+
|
506 |
+
@pytest.mark.parametrize(
|
507 |
+
"file", ["stata6_113", "stata6_114", "stata6_115", "stata6_117"]
|
508 |
+
)
|
509 |
+
def test_read_write_reread_dta15(self, file, datapath):
|
510 |
+
expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
|
511 |
+
expected["byte_"] = expected["byte_"].astype(np.int8)
|
512 |
+
expected["int_"] = expected["int_"].astype(np.int16)
|
513 |
+
expected["long_"] = expected["long_"].astype(np.int32)
|
514 |
+
expected["float_"] = expected["float_"].astype(np.float32)
|
515 |
+
expected["double_"] = expected["double_"].astype(np.float64)
|
516 |
+
expected["date_td"] = expected["date_td"].apply(
|
517 |
+
datetime.strptime, args=("%Y-%m-%d",)
|
518 |
+
)
|
519 |
+
|
520 |
+
file = datapath("io", "data", "stata", f"{file}.dta")
|
521 |
+
parsed = self.read_dta(file)
|
522 |
+
|
523 |
+
tm.assert_frame_equal(expected, parsed)
|
524 |
+
|
525 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
526 |
+
def test_timestamp_and_label(self, version):
|
527 |
+
original = DataFrame([(1,)], columns=["variable"])
|
528 |
+
time_stamp = datetime(2000, 2, 29, 14, 21)
|
529 |
+
data_label = "This is a data file."
|
530 |
+
with tm.ensure_clean() as path:
|
531 |
+
original.to_stata(
|
532 |
+
path, time_stamp=time_stamp, data_label=data_label, version=version
|
533 |
+
)
|
534 |
+
|
535 |
+
with StataReader(path) as reader:
|
536 |
+
assert reader.time_stamp == "29 Feb 2000 14:21"
|
537 |
+
assert reader.data_label == data_label
|
538 |
+
|
539 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
540 |
+
def test_invalid_timestamp(self, version):
|
541 |
+
original = DataFrame([(1,)], columns=["variable"])
|
542 |
+
time_stamp = "01 Jan 2000, 00:00:00"
|
543 |
+
with tm.ensure_clean() as path:
|
544 |
+
msg = "time_stamp should be datetime type"
|
545 |
+
with pytest.raises(ValueError, match=msg):
|
546 |
+
original.to_stata(path, time_stamp=time_stamp, version=version)
|
547 |
+
assert not os.path.isfile(path)
|
548 |
+
|
549 |
+
def test_numeric_column_names(self):
|
550 |
+
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
|
551 |
+
original.index.name = "index"
|
552 |
+
with tm.ensure_clean() as path:
|
553 |
+
# should get a warning for that format.
|
554 |
+
with tm.assert_produces_warning(InvalidColumnName):
|
555 |
+
original.to_stata(path)
|
556 |
+
|
557 |
+
written_and_read_again = self.read_dta(path)
|
558 |
+
|
559 |
+
written_and_read_again = written_and_read_again.set_index("index")
|
560 |
+
columns = list(written_and_read_again.columns)
|
561 |
+
convert_col_name = lambda x: int(x[1])
|
562 |
+
written_and_read_again.columns = map(convert_col_name, columns)
|
563 |
+
|
564 |
+
expected = original.copy()
|
565 |
+
expected.index = expected.index.astype(np.int32)
|
566 |
+
tm.assert_frame_equal(expected, written_and_read_again)
|
567 |
+
|
568 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
569 |
+
def test_nan_to_missing_value(self, version):
|
570 |
+
s1 = Series(np.arange(4.0), dtype=np.float32)
|
571 |
+
s2 = Series(np.arange(4.0), dtype=np.float64)
|
572 |
+
s1[::2] = np.nan
|
573 |
+
s2[1::2] = np.nan
|
574 |
+
original = DataFrame({"s1": s1, "s2": s2})
|
575 |
+
original.index.name = "index"
|
576 |
+
|
577 |
+
with tm.ensure_clean() as path:
|
578 |
+
original.to_stata(path, version=version)
|
579 |
+
written_and_read_again = self.read_dta(path)
|
580 |
+
|
581 |
+
written_and_read_again = written_and_read_again.set_index("index")
|
582 |
+
expected = original.copy()
|
583 |
+
expected.index = expected.index.astype(np.int32)
|
584 |
+
tm.assert_frame_equal(written_and_read_again, expected)
|
585 |
+
|
586 |
+
def test_no_index(self):
|
587 |
+
columns = ["x", "y"]
|
588 |
+
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns)
|
589 |
+
original.index.name = "index_not_written"
|
590 |
+
with tm.ensure_clean() as path:
|
591 |
+
original.to_stata(path, write_index=False)
|
592 |
+
written_and_read_again = self.read_dta(path)
|
593 |
+
with pytest.raises(KeyError, match=original.index.name):
|
594 |
+
written_and_read_again["index_not_written"]
|
595 |
+
|
596 |
+
def test_string_no_dates(self):
|
597 |
+
s1 = Series(["a", "A longer string"])
|
598 |
+
s2 = Series([1.0, 2.0], dtype=np.float64)
|
599 |
+
original = DataFrame({"s1": s1, "s2": s2})
|
600 |
+
original.index.name = "index"
|
601 |
+
with tm.ensure_clean() as path:
|
602 |
+
original.to_stata(path)
|
603 |
+
written_and_read_again = self.read_dta(path)
|
604 |
+
|
605 |
+
expected = original.copy()
|
606 |
+
expected.index = expected.index.astype(np.int32)
|
607 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
608 |
+
|
609 |
+
def test_large_value_conversion(self):
|
610 |
+
s0 = Series([1, 99], dtype=np.int8)
|
611 |
+
s1 = Series([1, 127], dtype=np.int8)
|
612 |
+
s2 = Series([1, 2**15 - 1], dtype=np.int16)
|
613 |
+
s3 = Series([1, 2**63 - 1], dtype=np.int64)
|
614 |
+
original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3})
|
615 |
+
original.index.name = "index"
|
616 |
+
with tm.ensure_clean() as path:
|
617 |
+
with tm.assert_produces_warning(PossiblePrecisionLoss):
|
618 |
+
original.to_stata(path)
|
619 |
+
|
620 |
+
written_and_read_again = self.read_dta(path)
|
621 |
+
|
622 |
+
modified = original.copy()
|
623 |
+
modified["s1"] = Series(modified["s1"], dtype=np.int16)
|
624 |
+
modified["s2"] = Series(modified["s2"], dtype=np.int32)
|
625 |
+
modified["s3"] = Series(modified["s3"], dtype=np.float64)
|
626 |
+
modified.index = original.index.astype(np.int32)
|
627 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
|
628 |
+
|
629 |
+
def test_dates_invalid_column(self):
|
630 |
+
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
|
631 |
+
original.index.name = "index"
|
632 |
+
with tm.ensure_clean() as path:
|
633 |
+
with tm.assert_produces_warning(InvalidColumnName):
|
634 |
+
original.to_stata(path, convert_dates={0: "tc"})
|
635 |
+
|
636 |
+
written_and_read_again = self.read_dta(path)
|
637 |
+
|
638 |
+
modified = original.copy()
|
639 |
+
modified.columns = ["_0"]
|
640 |
+
modified.index = original.index.astype(np.int32)
|
641 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
|
642 |
+
|
643 |
+
def test_105(self, datapath):
|
644 |
+
# Data obtained from:
|
645 |
+
# http://go.worldbank.org/ZXY29PVJ21
|
646 |
+
dpath = datapath("io", "data", "stata", "S4_EDUC1.dta")
|
647 |
+
df = read_stata(dpath)
|
648 |
+
df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
|
649 |
+
df0 = DataFrame(df0)
|
650 |
+
df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
|
651 |
+
df0["clustnum"] = df0["clustnum"].astype(np.int16)
|
652 |
+
df0["pri_schl"] = df0["pri_schl"].astype(np.int8)
|
653 |
+
df0["psch_num"] = df0["psch_num"].astype(np.int8)
|
654 |
+
df0["psch_dis"] = df0["psch_dis"].astype(np.float32)
|
655 |
+
tm.assert_frame_equal(df.head(3), df0)
|
656 |
+
|
657 |
+
def test_value_labels_old_format(self, datapath):
|
658 |
+
# GH 19417
|
659 |
+
#
|
660 |
+
# Test that value_labels() returns an empty dict if the file format
|
661 |
+
# predates supporting value labels.
|
662 |
+
dpath = datapath("io", "data", "stata", "S4_EDUC1.dta")
|
663 |
+
with StataReader(dpath) as reader:
|
664 |
+
assert reader.value_labels() == {}
|
665 |
+
|
666 |
+
def test_date_export_formats(self):
|
667 |
+
columns = ["tc", "td", "tw", "tm", "tq", "th", "ty"]
|
668 |
+
conversions = {c: c for c in columns}
|
669 |
+
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
|
670 |
+
original = DataFrame([data], columns=columns)
|
671 |
+
original.index.name = "index"
|
672 |
+
expected_values = [
|
673 |
+
datetime(2006, 11, 20, 23, 13, 20), # Time
|
674 |
+
datetime(2006, 11, 20), # Day
|
675 |
+
datetime(2006, 11, 19), # Week
|
676 |
+
datetime(2006, 11, 1), # Month
|
677 |
+
datetime(2006, 10, 1), # Quarter year
|
678 |
+
datetime(2006, 7, 1), # Half year
|
679 |
+
datetime(2006, 1, 1),
|
680 |
+
] # Year
|
681 |
+
|
682 |
+
expected = DataFrame(
|
683 |
+
[expected_values],
|
684 |
+
index=pd.Index([0], dtype=np.int32, name="index"),
|
685 |
+
columns=columns,
|
686 |
+
)
|
687 |
+
|
688 |
+
with tm.ensure_clean() as path:
|
689 |
+
original.to_stata(path, convert_dates=conversions)
|
690 |
+
written_and_read_again = self.read_dta(path)
|
691 |
+
|
692 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
693 |
+
|
694 |
+
def test_write_missing_strings(self):
|
695 |
+
original = DataFrame([["1"], [None]], columns=["foo"])
|
696 |
+
|
697 |
+
expected = DataFrame(
|
698 |
+
[["1"], [""]],
|
699 |
+
index=pd.Index([0, 1], dtype=np.int32, name="index"),
|
700 |
+
columns=["foo"],
|
701 |
+
)
|
702 |
+
|
703 |
+
with tm.ensure_clean() as path:
|
704 |
+
original.to_stata(path)
|
705 |
+
written_and_read_again = self.read_dta(path)
|
706 |
+
|
707 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
708 |
+
|
709 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
710 |
+
@pytest.mark.parametrize("byteorder", [">", "<"])
|
711 |
+
def test_bool_uint(self, byteorder, version):
|
712 |
+
s0 = Series([0, 1, True], dtype=np.bool_)
|
713 |
+
s1 = Series([0, 1, 100], dtype=np.uint8)
|
714 |
+
s2 = Series([0, 1, 255], dtype=np.uint8)
|
715 |
+
s3 = Series([0, 1, 2**15 - 100], dtype=np.uint16)
|
716 |
+
s4 = Series([0, 1, 2**16 - 1], dtype=np.uint16)
|
717 |
+
s5 = Series([0, 1, 2**31 - 100], dtype=np.uint32)
|
718 |
+
s6 = Series([0, 1, 2**32 - 1], dtype=np.uint32)
|
719 |
+
|
720 |
+
original = DataFrame(
|
721 |
+
{"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6}
|
722 |
+
)
|
723 |
+
original.index.name = "index"
|
724 |
+
expected = original.copy()
|
725 |
+
expected.index = original.index.astype(np.int32)
|
726 |
+
expected_types = (
|
727 |
+
np.int8,
|
728 |
+
np.int8,
|
729 |
+
np.int16,
|
730 |
+
np.int16,
|
731 |
+
np.int32,
|
732 |
+
np.int32,
|
733 |
+
np.float64,
|
734 |
+
)
|
735 |
+
for c, t in zip(expected.columns, expected_types):
|
736 |
+
expected[c] = expected[c].astype(t)
|
737 |
+
|
738 |
+
with tm.ensure_clean() as path:
|
739 |
+
original.to_stata(path, byteorder=byteorder, version=version)
|
740 |
+
written_and_read_again = self.read_dta(path)
|
741 |
+
|
742 |
+
written_and_read_again = written_and_read_again.set_index("index")
|
743 |
+
tm.assert_frame_equal(written_and_read_again, expected)
|
744 |
+
|
745 |
+
def test_variable_labels(self, datapath):
|
746 |
+
with StataReader(datapath("io", "data", "stata", "stata7_115.dta")) as rdr:
|
747 |
+
sr_115 = rdr.variable_labels()
|
748 |
+
with StataReader(datapath("io", "data", "stata", "stata7_117.dta")) as rdr:
|
749 |
+
sr_117 = rdr.variable_labels()
|
750 |
+
keys = ("var1", "var2", "var3")
|
751 |
+
labels = ("label1", "label2", "label3")
|
752 |
+
for k, v in sr_115.items():
|
753 |
+
assert k in sr_117
|
754 |
+
assert v == sr_117[k]
|
755 |
+
assert k in keys
|
756 |
+
assert v in labels
|
757 |
+
|
758 |
+
def test_minimal_size_col(self):
|
759 |
+
str_lens = (1, 100, 244)
|
760 |
+
s = {}
|
761 |
+
for str_len in str_lens:
|
762 |
+
s["s" + str(str_len)] = Series(
|
763 |
+
["a" * str_len, "b" * str_len, "c" * str_len]
|
764 |
+
)
|
765 |
+
original = DataFrame(s)
|
766 |
+
with tm.ensure_clean() as path:
|
767 |
+
original.to_stata(path, write_index=False)
|
768 |
+
|
769 |
+
with StataReader(path) as sr:
|
770 |
+
sr._ensure_open() # The `_*list` variables are initialized here
|
771 |
+
for variable, fmt, typ in zip(sr._varlist, sr._fmtlist, sr._typlist):
|
772 |
+
assert int(variable[1:]) == int(fmt[1:-1])
|
773 |
+
assert int(variable[1:]) == typ
|
774 |
+
|
775 |
+
def test_excessively_long_string(self):
|
776 |
+
str_lens = (1, 244, 500)
|
777 |
+
s = {}
|
778 |
+
for str_len in str_lens:
|
779 |
+
s["s" + str(str_len)] = Series(
|
780 |
+
["a" * str_len, "b" * str_len, "c" * str_len]
|
781 |
+
)
|
782 |
+
original = DataFrame(s)
|
783 |
+
msg = (
|
784 |
+
r"Fixed width strings in Stata \.dta files are limited to 244 "
|
785 |
+
r"\(or fewer\)\ncharacters\. Column 's500' does not satisfy "
|
786 |
+
r"this restriction\. Use the\n'version=117' parameter to write "
|
787 |
+
r"the newer \(Stata 13 and later\) format\."
|
788 |
+
)
|
789 |
+
with pytest.raises(ValueError, match=msg):
|
790 |
+
with tm.ensure_clean() as path:
|
791 |
+
original.to_stata(path)
|
792 |
+
|
793 |
+
def test_missing_value_generator(self):
|
794 |
+
types = ("b", "h", "l")
|
795 |
+
df = DataFrame([[0.0]], columns=["float_"])
|
796 |
+
with tm.ensure_clean() as path:
|
797 |
+
df.to_stata(path)
|
798 |
+
with StataReader(path) as rdr:
|
799 |
+
valid_range = rdr.VALID_RANGE
|
800 |
+
expected_values = ["." + chr(97 + i) for i in range(26)]
|
801 |
+
expected_values.insert(0, ".")
|
802 |
+
for t in types:
|
803 |
+
offset = valid_range[t][1]
|
804 |
+
for i in range(27):
|
805 |
+
val = StataMissingValue(offset + 1 + i)
|
806 |
+
assert val.string == expected_values[i]
|
807 |
+
|
808 |
+
# Test extremes for floats
|
809 |
+
val = StataMissingValue(struct.unpack("<f", b"\x00\x00\x00\x7f")[0])
|
810 |
+
assert val.string == "."
|
811 |
+
val = StataMissingValue(struct.unpack("<f", b"\x00\xd0\x00\x7f")[0])
|
812 |
+
assert val.string == ".z"
|
813 |
+
|
814 |
+
# Test extremes for floats
|
815 |
+
val = StataMissingValue(
|
816 |
+
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
|
817 |
+
)
|
818 |
+
assert val.string == "."
|
819 |
+
val = StataMissingValue(
|
820 |
+
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x1a\xe0\x7f")[0]
|
821 |
+
)
|
822 |
+
assert val.string == ".z"
|
823 |
+
|
824 |
+
@pytest.mark.parametrize("file", ["stata8_113", "stata8_115", "stata8_117"])
|
825 |
+
def test_missing_value_conversion(self, file, datapath):
|
826 |
+
columns = ["int8_", "int16_", "int32_", "float32_", "float64_"]
|
827 |
+
smv = StataMissingValue(101)
|
828 |
+
keys = sorted(smv.MISSING_VALUES.keys())
|
829 |
+
data = []
|
830 |
+
for i in range(27):
|
831 |
+
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
|
832 |
+
data.append(row)
|
833 |
+
expected = DataFrame(data, columns=columns)
|
834 |
+
|
835 |
+
parsed = read_stata(
|
836 |
+
datapath("io", "data", "stata", f"{file}.dta"), convert_missing=True
|
837 |
+
)
|
838 |
+
tm.assert_frame_equal(parsed, expected)
|
839 |
+
|
840 |
+
def test_big_dates(self, datapath):
|
841 |
+
yr = [1960, 2000, 9999, 100, 2262, 1677]
|
842 |
+
mo = [1, 1, 12, 1, 4, 9]
|
843 |
+
dd = [1, 1, 31, 1, 22, 23]
|
844 |
+
hr = [0, 0, 23, 0, 0, 0]
|
845 |
+
mm = [0, 0, 59, 0, 0, 0]
|
846 |
+
ss = [0, 0, 59, 0, 0, 0]
|
847 |
+
expected = []
|
848 |
+
for year, month, day, hour, minute, second in zip(yr, mo, dd, hr, mm, ss):
|
849 |
+
row = []
|
850 |
+
for j in range(7):
|
851 |
+
if j == 0:
|
852 |
+
row.append(datetime(year, month, day, hour, minute, second))
|
853 |
+
elif j == 6:
|
854 |
+
row.append(datetime(year, 1, 1))
|
855 |
+
else:
|
856 |
+
row.append(datetime(year, month, day))
|
857 |
+
expected.append(row)
|
858 |
+
expected.append([pd.NaT] * 7)
|
859 |
+
columns = [
|
860 |
+
"date_tc",
|
861 |
+
"date_td",
|
862 |
+
"date_tw",
|
863 |
+
"date_tm",
|
864 |
+
"date_tq",
|
865 |
+
"date_th",
|
866 |
+
"date_ty",
|
867 |
+
]
|
868 |
+
|
869 |
+
# Fixes for weekly, quarterly,half,year
|
870 |
+
expected[2][2] = datetime(9999, 12, 24)
|
871 |
+
expected[2][3] = datetime(9999, 12, 1)
|
872 |
+
expected[2][4] = datetime(9999, 10, 1)
|
873 |
+
expected[2][5] = datetime(9999, 7, 1)
|
874 |
+
expected[4][2] = datetime(2262, 4, 16)
|
875 |
+
expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
|
876 |
+
expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
|
877 |
+
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677, 10, 1)
|
878 |
+
expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
|
879 |
+
|
880 |
+
expected = DataFrame(expected, columns=columns, dtype=object)
|
881 |
+
parsed_115 = read_stata(datapath("io", "data", "stata", "stata9_115.dta"))
|
882 |
+
parsed_117 = read_stata(datapath("io", "data", "stata", "stata9_117.dta"))
|
883 |
+
tm.assert_frame_equal(expected, parsed_115, check_datetimelike_compat=True)
|
884 |
+
tm.assert_frame_equal(expected, parsed_117, check_datetimelike_compat=True)
|
885 |
+
|
886 |
+
date_conversion = {c: c[-2:] for c in columns}
|
887 |
+
# {c : c[-2:] for c in columns}
|
888 |
+
with tm.ensure_clean() as path:
|
889 |
+
expected.index.name = "index"
|
890 |
+
expected.to_stata(path, convert_dates=date_conversion)
|
891 |
+
written_and_read_again = self.read_dta(path)
|
892 |
+
|
893 |
+
tm.assert_frame_equal(
|
894 |
+
written_and_read_again.set_index("index"),
|
895 |
+
expected.set_index(expected.index.astype(np.int32)),
|
896 |
+
check_datetimelike_compat=True,
|
897 |
+
)
|
898 |
+
|
899 |
+
def test_dtype_conversion(self, datapath):
|
900 |
+
expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
|
901 |
+
expected["byte_"] = expected["byte_"].astype(np.int8)
|
902 |
+
expected["int_"] = expected["int_"].astype(np.int16)
|
903 |
+
expected["long_"] = expected["long_"].astype(np.int32)
|
904 |
+
expected["float_"] = expected["float_"].astype(np.float32)
|
905 |
+
expected["double_"] = expected["double_"].astype(np.float64)
|
906 |
+
expected["date_td"] = expected["date_td"].apply(
|
907 |
+
datetime.strptime, args=("%Y-%m-%d",)
|
908 |
+
)
|
909 |
+
|
910 |
+
no_conversion = read_stata(
|
911 |
+
datapath("io", "data", "stata", "stata6_117.dta"), convert_dates=True
|
912 |
+
)
|
913 |
+
tm.assert_frame_equal(expected, no_conversion)
|
914 |
+
|
915 |
+
conversion = read_stata(
|
916 |
+
datapath("io", "data", "stata", "stata6_117.dta"),
|
917 |
+
convert_dates=True,
|
918 |
+
preserve_dtypes=False,
|
919 |
+
)
|
920 |
+
|
921 |
+
# read_csv types are the same
|
922 |
+
expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
|
923 |
+
expected["date_td"] = expected["date_td"].apply(
|
924 |
+
datetime.strptime, args=("%Y-%m-%d",)
|
925 |
+
)
|
926 |
+
|
927 |
+
tm.assert_frame_equal(expected, conversion)
|
928 |
+
|
929 |
+
def test_drop_column(self, datapath):
|
930 |
+
expected = self.read_csv(datapath("io", "data", "stata", "stata6.csv"))
|
931 |
+
expected["byte_"] = expected["byte_"].astype(np.int8)
|
932 |
+
expected["int_"] = expected["int_"].astype(np.int16)
|
933 |
+
expected["long_"] = expected["long_"].astype(np.int32)
|
934 |
+
expected["float_"] = expected["float_"].astype(np.float32)
|
935 |
+
expected["double_"] = expected["double_"].astype(np.float64)
|
936 |
+
expected["date_td"] = expected["date_td"].apply(
|
937 |
+
datetime.strptime, args=("%Y-%m-%d",)
|
938 |
+
)
|
939 |
+
|
940 |
+
columns = ["byte_", "int_", "long_"]
|
941 |
+
expected = expected[columns]
|
942 |
+
dropped = read_stata(
|
943 |
+
datapath("io", "data", "stata", "stata6_117.dta"),
|
944 |
+
convert_dates=True,
|
945 |
+
columns=columns,
|
946 |
+
)
|
947 |
+
|
948 |
+
tm.assert_frame_equal(expected, dropped)
|
949 |
+
|
950 |
+
# See PR 10757
|
951 |
+
columns = ["int_", "long_", "byte_"]
|
952 |
+
expected = expected[columns]
|
953 |
+
reordered = read_stata(
|
954 |
+
datapath("io", "data", "stata", "stata6_117.dta"),
|
955 |
+
convert_dates=True,
|
956 |
+
columns=columns,
|
957 |
+
)
|
958 |
+
tm.assert_frame_equal(expected, reordered)
|
959 |
+
|
960 |
+
msg = "columns contains duplicate entries"
|
961 |
+
with pytest.raises(ValueError, match=msg):
|
962 |
+
columns = ["byte_", "byte_"]
|
963 |
+
read_stata(
|
964 |
+
datapath("io", "data", "stata", "stata6_117.dta"),
|
965 |
+
convert_dates=True,
|
966 |
+
columns=columns,
|
967 |
+
)
|
968 |
+
|
969 |
+
msg = "The following columns were not found in the Stata data set: not_found"
|
970 |
+
with pytest.raises(ValueError, match=msg):
|
971 |
+
columns = ["byte_", "int_", "long_", "not_found"]
|
972 |
+
read_stata(
|
973 |
+
datapath("io", "data", "stata", "stata6_117.dta"),
|
974 |
+
convert_dates=True,
|
975 |
+
columns=columns,
|
976 |
+
)
|
977 |
+
|
978 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
979 |
+
@pytest.mark.filterwarnings(
|
980 |
+
"ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch"
|
981 |
+
)
|
982 |
+
def test_categorical_writing(self, version):
|
983 |
+
original = DataFrame.from_records(
|
984 |
+
[
|
985 |
+
["one", "ten", "one", "one", "one", 1],
|
986 |
+
["two", "nine", "two", "two", "two", 2],
|
987 |
+
["three", "eight", "three", "three", "three", 3],
|
988 |
+
["four", "seven", 4, "four", "four", 4],
|
989 |
+
["five", "six", 5, np.nan, "five", 5],
|
990 |
+
["six", "five", 6, np.nan, "six", 6],
|
991 |
+
["seven", "four", 7, np.nan, "seven", 7],
|
992 |
+
["eight", "three", 8, np.nan, "eight", 8],
|
993 |
+
["nine", "two", 9, np.nan, "nine", 9],
|
994 |
+
["ten", "one", "ten", np.nan, "ten", 10],
|
995 |
+
],
|
996 |
+
columns=[
|
997 |
+
"fully_labeled",
|
998 |
+
"fully_labeled2",
|
999 |
+
"incompletely_labeled",
|
1000 |
+
"labeled_with_missings",
|
1001 |
+
"float_labelled",
|
1002 |
+
"unlabeled",
|
1003 |
+
],
|
1004 |
+
)
|
1005 |
+
expected = original.copy()
|
1006 |
+
|
1007 |
+
# these are all categoricals
|
1008 |
+
original = pd.concat(
|
1009 |
+
[original[col].astype("category") for col in original], axis=1
|
1010 |
+
)
|
1011 |
+
expected.index = expected.index.set_names("index").astype(np.int32)
|
1012 |
+
|
1013 |
+
expected["incompletely_labeled"] = expected["incompletely_labeled"].apply(str)
|
1014 |
+
expected["unlabeled"] = expected["unlabeled"].apply(str)
|
1015 |
+
for col in expected:
|
1016 |
+
orig = expected[col].copy()
|
1017 |
+
|
1018 |
+
cat = orig.astype("category")._values
|
1019 |
+
cat = cat.as_ordered()
|
1020 |
+
if col == "unlabeled":
|
1021 |
+
cat = cat.set_categories(orig, ordered=True)
|
1022 |
+
|
1023 |
+
cat.categories.rename(None, inplace=True)
|
1024 |
+
|
1025 |
+
expected[col] = cat
|
1026 |
+
|
1027 |
+
with tm.ensure_clean() as path:
|
1028 |
+
original.to_stata(path, version=version)
|
1029 |
+
written_and_read_again = self.read_dta(path)
|
1030 |
+
|
1031 |
+
res = written_and_read_again.set_index("index")
|
1032 |
+
tm.assert_frame_equal(res, expected)
|
1033 |
+
|
1034 |
+
def test_categorical_warnings_and_errors(self):
|
1035 |
+
# Warning for non-string labels
|
1036 |
+
# Error for labels too long
|
1037 |
+
original = DataFrame.from_records(
|
1038 |
+
[["a" * 10000], ["b" * 10000], ["c" * 10000], ["d" * 10000]],
|
1039 |
+
columns=["Too_long"],
|
1040 |
+
)
|
1041 |
+
|
1042 |
+
original = pd.concat(
|
1043 |
+
[original[col].astype("category") for col in original], axis=1
|
1044 |
+
)
|
1045 |
+
with tm.ensure_clean() as path:
|
1046 |
+
msg = (
|
1047 |
+
"Stata value labels for a single variable must have "
|
1048 |
+
r"a combined length less than 32,000 characters\."
|
1049 |
+
)
|
1050 |
+
with pytest.raises(ValueError, match=msg):
|
1051 |
+
original.to_stata(path)
|
1052 |
+
|
1053 |
+
original = DataFrame.from_records(
|
1054 |
+
[["a"], ["b"], ["c"], ["d"], [1]], columns=["Too_long"]
|
1055 |
+
)
|
1056 |
+
original = pd.concat(
|
1057 |
+
[original[col].astype("category") for col in original], axis=1
|
1058 |
+
)
|
1059 |
+
|
1060 |
+
with tm.assert_produces_warning(ValueLabelTypeMismatch):
|
1061 |
+
original.to_stata(path)
|
1062 |
+
# should get a warning for mixed content
|
1063 |
+
|
1064 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
1065 |
+
def test_categorical_with_stata_missing_values(self, version):
|
1066 |
+
values = [["a" + str(i)] for i in range(120)]
|
1067 |
+
values.append([np.nan])
|
1068 |
+
original = DataFrame.from_records(values, columns=["many_labels"])
|
1069 |
+
original = pd.concat(
|
1070 |
+
[original[col].astype("category") for col in original], axis=1
|
1071 |
+
)
|
1072 |
+
original.index.name = "index"
|
1073 |
+
with tm.ensure_clean() as path:
|
1074 |
+
original.to_stata(path, version=version)
|
1075 |
+
written_and_read_again = self.read_dta(path)
|
1076 |
+
|
1077 |
+
res = written_and_read_again.set_index("index")
|
1078 |
+
|
1079 |
+
expected = original.copy()
|
1080 |
+
for col in expected:
|
1081 |
+
cat = expected[col]._values
|
1082 |
+
new_cats = cat.remove_unused_categories().categories
|
1083 |
+
cat = cat.set_categories(new_cats, ordered=True)
|
1084 |
+
expected[col] = cat
|
1085 |
+
expected.index = expected.index.astype(np.int32)
|
1086 |
+
tm.assert_frame_equal(res, expected)
|
1087 |
+
|
1088 |
+
@pytest.mark.parametrize("file", ["stata10_115", "stata10_117"])
|
1089 |
+
def test_categorical_order(self, file, datapath):
|
1090 |
+
# Directly construct using expected codes
|
1091 |
+
# Format is is_cat, col_name, labels (in order), underlying data
|
1092 |
+
expected = [
|
1093 |
+
(True, "ordered", ["a", "b", "c", "d", "e"], np.arange(5)),
|
1094 |
+
(True, "reverse", ["a", "b", "c", "d", "e"], np.arange(5)[::-1]),
|
1095 |
+
(True, "noorder", ["a", "b", "c", "d", "e"], np.array([2, 1, 4, 0, 3])),
|
1096 |
+
(True, "floating", ["a", "b", "c", "d", "e"], np.arange(0, 5)),
|
1097 |
+
(True, "float_missing", ["a", "d", "e"], np.array([0, 1, 2, -1, -1])),
|
1098 |
+
(False, "nolabel", [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
|
1099 |
+
(True, "int32_mixed", ["d", 2, "e", "b", "a"], np.arange(5)),
|
1100 |
+
]
|
1101 |
+
cols = []
|
1102 |
+
for is_cat, col, labels, codes in expected:
|
1103 |
+
if is_cat:
|
1104 |
+
cols.append(
|
1105 |
+
(col, pd.Categorical.from_codes(codes, labels, ordered=True))
|
1106 |
+
)
|
1107 |
+
else:
|
1108 |
+
cols.append((col, Series(labels, dtype=np.float32)))
|
1109 |
+
expected = DataFrame.from_dict(dict(cols))
|
1110 |
+
|
1111 |
+
# Read with and with out categoricals, ensure order is identical
|
1112 |
+
file = datapath("io", "data", "stata", f"{file}.dta")
|
1113 |
+
parsed = read_stata(file)
|
1114 |
+
tm.assert_frame_equal(expected, parsed)
|
1115 |
+
|
1116 |
+
# Check identity of codes
|
1117 |
+
for col in expected:
|
1118 |
+
if isinstance(expected[col].dtype, CategoricalDtype):
|
1119 |
+
tm.assert_series_equal(expected[col].cat.codes, parsed[col].cat.codes)
|
1120 |
+
tm.assert_index_equal(
|
1121 |
+
expected[col].cat.categories, parsed[col].cat.categories
|
1122 |
+
)
|
1123 |
+
|
1124 |
+
@pytest.mark.parametrize("file", ["stata11_115", "stata11_117"])
|
1125 |
+
def test_categorical_sorting(self, file, datapath):
|
1126 |
+
parsed = read_stata(datapath("io", "data", "stata", f"{file}.dta"))
|
1127 |
+
|
1128 |
+
# Sort based on codes, not strings
|
1129 |
+
parsed = parsed.sort_values("srh", na_position="first")
|
1130 |
+
|
1131 |
+
# Don't sort index
|
1132 |
+
parsed.index = pd.RangeIndex(len(parsed))
|
1133 |
+
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
|
1134 |
+
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
|
1135 |
+
cat = pd.Categorical.from_codes(
|
1136 |
+
codes=codes, categories=categories, ordered=True
|
1137 |
+
)
|
1138 |
+
expected = Series(cat, name="srh")
|
1139 |
+
tm.assert_series_equal(expected, parsed["srh"])
|
1140 |
+
|
1141 |
+
@pytest.mark.parametrize("file", ["stata10_115", "stata10_117"])
|
1142 |
+
def test_categorical_ordering(self, file, datapath):
|
1143 |
+
file = datapath("io", "data", "stata", f"{file}.dta")
|
1144 |
+
parsed = read_stata(file)
|
1145 |
+
|
1146 |
+
parsed_unordered = read_stata(file, order_categoricals=False)
|
1147 |
+
for col in parsed:
|
1148 |
+
if not isinstance(parsed[col].dtype, CategoricalDtype):
|
1149 |
+
continue
|
1150 |
+
assert parsed[col].cat.ordered
|
1151 |
+
assert not parsed_unordered[col].cat.ordered
|
1152 |
+
|
1153 |
+
@pytest.mark.filterwarnings("ignore::UserWarning")
|
1154 |
+
@pytest.mark.parametrize(
|
1155 |
+
"file",
|
1156 |
+
[
|
1157 |
+
"stata1_117",
|
1158 |
+
"stata2_117",
|
1159 |
+
"stata3_117",
|
1160 |
+
"stata4_117",
|
1161 |
+
"stata5_117",
|
1162 |
+
"stata6_117",
|
1163 |
+
"stata7_117",
|
1164 |
+
"stata8_117",
|
1165 |
+
"stata9_117",
|
1166 |
+
"stata10_117",
|
1167 |
+
"stata11_117",
|
1168 |
+
],
|
1169 |
+
)
|
1170 |
+
@pytest.mark.parametrize("chunksize", [1, 2])
|
1171 |
+
@pytest.mark.parametrize("convert_categoricals", [False, True])
|
1172 |
+
@pytest.mark.parametrize("convert_dates", [False, True])
|
1173 |
+
def test_read_chunks_117(
|
1174 |
+
self, file, chunksize, convert_categoricals, convert_dates, datapath
|
1175 |
+
):
|
1176 |
+
fname = datapath("io", "data", "stata", f"{file}.dta")
|
1177 |
+
|
1178 |
+
parsed = read_stata(
|
1179 |
+
fname,
|
1180 |
+
convert_categoricals=convert_categoricals,
|
1181 |
+
convert_dates=convert_dates,
|
1182 |
+
)
|
1183 |
+
with read_stata(
|
1184 |
+
fname,
|
1185 |
+
iterator=True,
|
1186 |
+
convert_categoricals=convert_categoricals,
|
1187 |
+
convert_dates=convert_dates,
|
1188 |
+
) as itr:
|
1189 |
+
pos = 0
|
1190 |
+
for j in range(5):
|
1191 |
+
try:
|
1192 |
+
chunk = itr.read(chunksize)
|
1193 |
+
except StopIteration:
|
1194 |
+
break
|
1195 |
+
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
|
1196 |
+
from_frame = self._convert_categorical(from_frame)
|
1197 |
+
tm.assert_frame_equal(
|
1198 |
+
from_frame, chunk, check_dtype=False, check_datetimelike_compat=True
|
1199 |
+
)
|
1200 |
+
pos += chunksize
|
1201 |
+
|
1202 |
+
@staticmethod
|
1203 |
+
def _convert_categorical(from_frame: DataFrame) -> DataFrame:
|
1204 |
+
"""
|
1205 |
+
Emulate the categorical casting behavior we expect from roundtripping.
|
1206 |
+
"""
|
1207 |
+
for col in from_frame:
|
1208 |
+
ser = from_frame[col]
|
1209 |
+
if isinstance(ser.dtype, CategoricalDtype):
|
1210 |
+
cat = ser._values.remove_unused_categories()
|
1211 |
+
if cat.categories.dtype == object:
|
1212 |
+
categories = pd.Index._with_infer(cat.categories._values)
|
1213 |
+
cat = cat.set_categories(categories)
|
1214 |
+
from_frame[col] = cat
|
1215 |
+
return from_frame
|
1216 |
+
|
1217 |
+
def test_iterator(self, datapath):
|
1218 |
+
fname = datapath("io", "data", "stata", "stata3_117.dta")
|
1219 |
+
|
1220 |
+
parsed = read_stata(fname)
|
1221 |
+
|
1222 |
+
with read_stata(fname, iterator=True) as itr:
|
1223 |
+
chunk = itr.read(5)
|
1224 |
+
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
|
1225 |
+
|
1226 |
+
with read_stata(fname, chunksize=5) as itr:
|
1227 |
+
chunk = list(itr)
|
1228 |
+
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
|
1229 |
+
|
1230 |
+
with read_stata(fname, iterator=True) as itr:
|
1231 |
+
chunk = itr.get_chunk(5)
|
1232 |
+
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
|
1233 |
+
|
1234 |
+
with read_stata(fname, chunksize=5) as itr:
|
1235 |
+
chunk = itr.get_chunk()
|
1236 |
+
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
|
1237 |
+
|
1238 |
+
# GH12153
|
1239 |
+
with read_stata(fname, chunksize=4) as itr:
|
1240 |
+
from_chunks = pd.concat(itr)
|
1241 |
+
tm.assert_frame_equal(parsed, from_chunks)
|
1242 |
+
|
1243 |
+
@pytest.mark.filterwarnings("ignore::UserWarning")
|
1244 |
+
@pytest.mark.parametrize(
|
1245 |
+
"file",
|
1246 |
+
[
|
1247 |
+
"stata2_115",
|
1248 |
+
"stata3_115",
|
1249 |
+
"stata4_115",
|
1250 |
+
"stata5_115",
|
1251 |
+
"stata6_115",
|
1252 |
+
"stata7_115",
|
1253 |
+
"stata8_115",
|
1254 |
+
"stata9_115",
|
1255 |
+
"stata10_115",
|
1256 |
+
"stata11_115",
|
1257 |
+
],
|
1258 |
+
)
|
1259 |
+
@pytest.mark.parametrize("chunksize", [1, 2])
|
1260 |
+
@pytest.mark.parametrize("convert_categoricals", [False, True])
|
1261 |
+
@pytest.mark.parametrize("convert_dates", [False, True])
|
1262 |
+
def test_read_chunks_115(
|
1263 |
+
self, file, chunksize, convert_categoricals, convert_dates, datapath
|
1264 |
+
):
|
1265 |
+
fname = datapath("io", "data", "stata", f"{file}.dta")
|
1266 |
+
|
1267 |
+
# Read the whole file
|
1268 |
+
parsed = read_stata(
|
1269 |
+
fname,
|
1270 |
+
convert_categoricals=convert_categoricals,
|
1271 |
+
convert_dates=convert_dates,
|
1272 |
+
)
|
1273 |
+
|
1274 |
+
# Compare to what we get when reading by chunk
|
1275 |
+
with read_stata(
|
1276 |
+
fname,
|
1277 |
+
iterator=True,
|
1278 |
+
convert_dates=convert_dates,
|
1279 |
+
convert_categoricals=convert_categoricals,
|
1280 |
+
) as itr:
|
1281 |
+
pos = 0
|
1282 |
+
for j in range(5):
|
1283 |
+
try:
|
1284 |
+
chunk = itr.read(chunksize)
|
1285 |
+
except StopIteration:
|
1286 |
+
break
|
1287 |
+
from_frame = parsed.iloc[pos : pos + chunksize, :].copy()
|
1288 |
+
from_frame = self._convert_categorical(from_frame)
|
1289 |
+
tm.assert_frame_equal(
|
1290 |
+
from_frame, chunk, check_dtype=False, check_datetimelike_compat=True
|
1291 |
+
)
|
1292 |
+
pos += chunksize
|
1293 |
+
|
1294 |
+
def test_read_chunks_columns(self, datapath):
|
1295 |
+
fname = datapath("io", "data", "stata", "stata3_117.dta")
|
1296 |
+
columns = ["quarter", "cpi", "m1"]
|
1297 |
+
chunksize = 2
|
1298 |
+
|
1299 |
+
parsed = read_stata(fname, columns=columns)
|
1300 |
+
with read_stata(fname, iterator=True) as itr:
|
1301 |
+
pos = 0
|
1302 |
+
for j in range(5):
|
1303 |
+
chunk = itr.read(chunksize, columns=columns)
|
1304 |
+
if chunk is None:
|
1305 |
+
break
|
1306 |
+
from_frame = parsed.iloc[pos : pos + chunksize, :]
|
1307 |
+
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
|
1308 |
+
pos += chunksize
|
1309 |
+
|
1310 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
1311 |
+
def test_write_variable_labels(self, version, mixed_frame):
|
1312 |
+
# GH 13631, add support for writing variable labels
|
1313 |
+
mixed_frame.index.name = "index"
|
1314 |
+
variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"}
|
1315 |
+
with tm.ensure_clean() as path:
|
1316 |
+
mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
|
1317 |
+
with StataReader(path) as sr:
|
1318 |
+
read_labels = sr.variable_labels()
|
1319 |
+
expected_labels = {
|
1320 |
+
"index": "",
|
1321 |
+
"a": "City Rank",
|
1322 |
+
"b": "City Exponent",
|
1323 |
+
"c": "City",
|
1324 |
+
}
|
1325 |
+
assert read_labels == expected_labels
|
1326 |
+
|
1327 |
+
variable_labels["index"] = "The Index"
|
1328 |
+
with tm.ensure_clean() as path:
|
1329 |
+
mixed_frame.to_stata(path, variable_labels=variable_labels, version=version)
|
1330 |
+
with StataReader(path) as sr:
|
1331 |
+
read_labels = sr.variable_labels()
|
1332 |
+
assert read_labels == variable_labels
|
1333 |
+
|
1334 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
1335 |
+
def test_invalid_variable_labels(self, version, mixed_frame):
|
1336 |
+
mixed_frame.index.name = "index"
|
1337 |
+
variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
|
1338 |
+
with tm.ensure_clean() as path:
|
1339 |
+
msg = "Variable labels must be 80 characters or fewer"
|
1340 |
+
with pytest.raises(ValueError, match=msg):
|
1341 |
+
mixed_frame.to_stata(
|
1342 |
+
path, variable_labels=variable_labels, version=version
|
1343 |
+
)
|
1344 |
+
|
1345 |
+
@pytest.mark.parametrize("version", [114, 117])
|
1346 |
+
def test_invalid_variable_label_encoding(self, version, mixed_frame):
|
1347 |
+
mixed_frame.index.name = "index"
|
1348 |
+
variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
|
1349 |
+
variable_labels["a"] = "invalid character Œ"
|
1350 |
+
with tm.ensure_clean() as path:
|
1351 |
+
with pytest.raises(
|
1352 |
+
ValueError, match="Variable labels must contain only characters"
|
1353 |
+
):
|
1354 |
+
mixed_frame.to_stata(
|
1355 |
+
path, variable_labels=variable_labels, version=version
|
1356 |
+
)
|
1357 |
+
|
1358 |
+
def test_write_variable_label_errors(self, mixed_frame):
|
1359 |
+
values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"]
|
1360 |
+
|
1361 |
+
variable_labels_utf8 = {
|
1362 |
+
"a": "City Rank",
|
1363 |
+
"b": "City Exponent",
|
1364 |
+
"c": "".join(values),
|
1365 |
+
}
|
1366 |
+
|
1367 |
+
msg = (
|
1368 |
+
"Variable labels must contain only characters that can be "
|
1369 |
+
"encoded in Latin-1"
|
1370 |
+
)
|
1371 |
+
with pytest.raises(ValueError, match=msg):
|
1372 |
+
with tm.ensure_clean() as path:
|
1373 |
+
mixed_frame.to_stata(path, variable_labels=variable_labels_utf8)
|
1374 |
+
|
1375 |
+
variable_labels_long = {
|
1376 |
+
"a": "City Rank",
|
1377 |
+
"b": "City Exponent",
|
1378 |
+
"c": "A very, very, very long variable label "
|
1379 |
+
"that is too long for Stata which means "
|
1380 |
+
"that it has more than 80 characters",
|
1381 |
+
}
|
1382 |
+
|
1383 |
+
msg = "Variable labels must be 80 characters or fewer"
|
1384 |
+
with pytest.raises(ValueError, match=msg):
|
1385 |
+
with tm.ensure_clean() as path:
|
1386 |
+
mixed_frame.to_stata(path, variable_labels=variable_labels_long)
|
1387 |
+
|
1388 |
+
def test_default_date_conversion(self):
|
1389 |
+
# GH 12259
|
1390 |
+
dates = [
|
1391 |
+
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
|
1392 |
+
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
|
1393 |
+
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
|
1394 |
+
]
|
1395 |
+
original = DataFrame(
|
1396 |
+
{
|
1397 |
+
"nums": [1.0, 2.0, 3.0],
|
1398 |
+
"strs": ["apple", "banana", "cherry"],
|
1399 |
+
"dates": dates,
|
1400 |
+
}
|
1401 |
+
)
|
1402 |
+
|
1403 |
+
with tm.ensure_clean() as path:
|
1404 |
+
original.to_stata(path, write_index=False)
|
1405 |
+
reread = read_stata(path, convert_dates=True)
|
1406 |
+
tm.assert_frame_equal(original, reread)
|
1407 |
+
|
1408 |
+
original.to_stata(path, write_index=False, convert_dates={"dates": "tc"})
|
1409 |
+
direct = read_stata(path, convert_dates=True)
|
1410 |
+
tm.assert_frame_equal(reread, direct)
|
1411 |
+
|
1412 |
+
dates_idx = original.columns.tolist().index("dates")
|
1413 |
+
original.to_stata(path, write_index=False, convert_dates={dates_idx: "tc"})
|
1414 |
+
direct = read_stata(path, convert_dates=True)
|
1415 |
+
tm.assert_frame_equal(reread, direct)
|
1416 |
+
|
1417 |
+
def test_unsupported_type(self):
|
1418 |
+
original = DataFrame({"a": [1 + 2j, 2 + 4j]})
|
1419 |
+
|
1420 |
+
msg = "Data type complex128 not supported"
|
1421 |
+
with pytest.raises(NotImplementedError, match=msg):
|
1422 |
+
with tm.ensure_clean() as path:
|
1423 |
+
original.to_stata(path)
|
1424 |
+
|
1425 |
+
def test_unsupported_datetype(self):
|
1426 |
+
dates = [
|
1427 |
+
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
|
1428 |
+
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
|
1429 |
+
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
|
1430 |
+
]
|
1431 |
+
original = DataFrame(
|
1432 |
+
{
|
1433 |
+
"nums": [1.0, 2.0, 3.0],
|
1434 |
+
"strs": ["apple", "banana", "cherry"],
|
1435 |
+
"dates": dates,
|
1436 |
+
}
|
1437 |
+
)
|
1438 |
+
|
1439 |
+
msg = "Format %tC not implemented"
|
1440 |
+
with pytest.raises(NotImplementedError, match=msg):
|
1441 |
+
with tm.ensure_clean() as path:
|
1442 |
+
original.to_stata(path, convert_dates={"dates": "tC"})
|
1443 |
+
|
1444 |
+
dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong")
|
1445 |
+
original = DataFrame(
|
1446 |
+
{
|
1447 |
+
"nums": [1.0, 2.0, 3.0],
|
1448 |
+
"strs": ["apple", "banana", "cherry"],
|
1449 |
+
"dates": dates,
|
1450 |
+
}
|
1451 |
+
)
|
1452 |
+
with pytest.raises(NotImplementedError, match="Data type datetime64"):
|
1453 |
+
with tm.ensure_clean() as path:
|
1454 |
+
original.to_stata(path)
|
1455 |
+
|
1456 |
+
def test_repeated_column_labels(self, datapath):
|
1457 |
+
# GH 13923, 25772
|
1458 |
+
msg = """
|
1459 |
+
Value labels for column ethnicsn are not unique. These cannot be converted to
|
1460 |
+
pandas categoricals.
|
1461 |
+
|
1462 |
+
Either read the file with `convert_categoricals` set to False or use the
|
1463 |
+
low level interface in `StataReader` to separately read the values and the
|
1464 |
+
value_labels.
|
1465 |
+
|
1466 |
+
The repeated labels are:\n-+\nwolof
|
1467 |
+
"""
|
1468 |
+
with pytest.raises(ValueError, match=msg):
|
1469 |
+
read_stata(
|
1470 |
+
datapath("io", "data", "stata", "stata15.dta"),
|
1471 |
+
convert_categoricals=True,
|
1472 |
+
)
|
1473 |
+
|
1474 |
+
def test_stata_111(self, datapath):
|
1475 |
+
# 111 is an old version but still used by current versions of
|
1476 |
+
# SAS when exporting to Stata format. We do not know of any
|
1477 |
+
# on-line documentation for this version.
|
1478 |
+
df = read_stata(datapath("io", "data", "stata", "stata7_111.dta"))
|
1479 |
+
original = DataFrame(
|
1480 |
+
{
|
1481 |
+
"y": [1, 1, 1, 1, 1, 0, 0, np.nan, 0, 0],
|
1482 |
+
"x": [1, 2, 1, 3, np.nan, 4, 3, 5, 1, 6],
|
1483 |
+
"w": [2, np.nan, 5, 2, 4, 4, 3, 1, 2, 3],
|
1484 |
+
"z": ["a", "b", "c", "d", "e", "", "g", "h", "i", "j"],
|
1485 |
+
}
|
1486 |
+
)
|
1487 |
+
original = original[["y", "x", "w", "z"]]
|
1488 |
+
tm.assert_frame_equal(original, df)
|
1489 |
+
|
1490 |
+
def test_out_of_range_double(self):
|
1491 |
+
# GH 14618
|
1492 |
+
df = DataFrame(
|
1493 |
+
{
|
1494 |
+
"ColumnOk": [0.0, np.finfo(np.double).eps, 4.49423283715579e307],
|
1495 |
+
"ColumnTooBig": [0.0, np.finfo(np.double).eps, np.finfo(np.double).max],
|
1496 |
+
}
|
1497 |
+
)
|
1498 |
+
msg = (
|
1499 |
+
r"Column ColumnTooBig has a maximum value \(.+\) outside the range "
|
1500 |
+
r"supported by Stata \(.+\)"
|
1501 |
+
)
|
1502 |
+
with pytest.raises(ValueError, match=msg):
|
1503 |
+
with tm.ensure_clean() as path:
|
1504 |
+
df.to_stata(path)
|
1505 |
+
|
1506 |
+
def test_out_of_range_float(self):
|
1507 |
+
original = DataFrame(
|
1508 |
+
{
|
1509 |
+
"ColumnOk": [
|
1510 |
+
0.0,
|
1511 |
+
np.finfo(np.float32).eps,
|
1512 |
+
np.finfo(np.float32).max / 10.0,
|
1513 |
+
],
|
1514 |
+
"ColumnTooBig": [
|
1515 |
+
0.0,
|
1516 |
+
np.finfo(np.float32).eps,
|
1517 |
+
np.finfo(np.float32).max,
|
1518 |
+
],
|
1519 |
+
}
|
1520 |
+
)
|
1521 |
+
original.index.name = "index"
|
1522 |
+
for col in original:
|
1523 |
+
original[col] = original[col].astype(np.float32)
|
1524 |
+
|
1525 |
+
with tm.ensure_clean() as path:
|
1526 |
+
original.to_stata(path)
|
1527 |
+
reread = read_stata(path)
|
1528 |
+
|
1529 |
+
original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64)
|
1530 |
+
expected = original.copy()
|
1531 |
+
expected.index = expected.index.astype(np.int32)
|
1532 |
+
tm.assert_frame_equal(reread.set_index("index"), expected)
|
1533 |
+
|
1534 |
+
@pytest.mark.parametrize("infval", [np.inf, -np.inf])
|
1535 |
+
def test_inf(self, infval):
|
1536 |
+
# GH 45350
|
1537 |
+
df = DataFrame({"WithoutInf": [0.0, 1.0], "WithInf": [2.0, infval]})
|
1538 |
+
msg = (
|
1539 |
+
"Column WithInf contains infinity or -infinity"
|
1540 |
+
"which is outside the range supported by Stata."
|
1541 |
+
)
|
1542 |
+
with pytest.raises(ValueError, match=msg):
|
1543 |
+
with tm.ensure_clean() as path:
|
1544 |
+
df.to_stata(path)
|
1545 |
+
|
1546 |
+
def test_path_pathlib(self):
|
1547 |
+
df = DataFrame(
|
1548 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
1549 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
1550 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
1551 |
+
)
|
1552 |
+
df.index.name = "index"
|
1553 |
+
reader = lambda x: read_stata(x).set_index("index")
|
1554 |
+
result = tm.round_trip_pathlib(df.to_stata, reader)
|
1555 |
+
tm.assert_frame_equal(df, result)
|
1556 |
+
|
1557 |
+
def test_pickle_path_localpath(self):
|
1558 |
+
df = DataFrame(
|
1559 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
1560 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
1561 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
1562 |
+
)
|
1563 |
+
df.index.name = "index"
|
1564 |
+
reader = lambda x: read_stata(x).set_index("index")
|
1565 |
+
result = tm.round_trip_localpath(df.to_stata, reader)
|
1566 |
+
tm.assert_frame_equal(df, result)
|
1567 |
+
|
1568 |
+
@pytest.mark.parametrize("write_index", [True, False])
|
1569 |
+
def test_value_labels_iterator(self, write_index):
|
1570 |
+
# GH 16923
|
1571 |
+
d = {"A": ["B", "E", "C", "A", "E"]}
|
1572 |
+
df = DataFrame(data=d)
|
1573 |
+
df["A"] = df["A"].astype("category")
|
1574 |
+
with tm.ensure_clean() as path:
|
1575 |
+
df.to_stata(path, write_index=write_index)
|
1576 |
+
|
1577 |
+
with read_stata(path, iterator=True) as dta_iter:
|
1578 |
+
value_labels = dta_iter.value_labels()
|
1579 |
+
assert value_labels == {"A": {0: "A", 1: "B", 2: "C", 3: "E"}}
|
1580 |
+
|
1581 |
+
def test_set_index(self):
|
1582 |
+
# GH 17328
|
1583 |
+
df = DataFrame(
|
1584 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
1585 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
1586 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
1587 |
+
)
|
1588 |
+
df.index.name = "index"
|
1589 |
+
with tm.ensure_clean() as path:
|
1590 |
+
df.to_stata(path)
|
1591 |
+
reread = read_stata(path, index_col="index")
|
1592 |
+
tm.assert_frame_equal(df, reread)
|
1593 |
+
|
1594 |
+
@pytest.mark.parametrize(
|
1595 |
+
"column", ["ms", "day", "week", "month", "qtr", "half", "yr"]
|
1596 |
+
)
|
1597 |
+
def test_date_parsing_ignores_format_details(self, column, datapath):
|
1598 |
+
# GH 17797
|
1599 |
+
#
|
1600 |
+
# Test that display formats are ignored when determining if a numeric
|
1601 |
+
# column is a date value.
|
1602 |
+
#
|
1603 |
+
# All date types are stored as numbers and format associated with the
|
1604 |
+
# column denotes both the type of the date and the display format.
|
1605 |
+
#
|
1606 |
+
# STATA supports 9 date types which each have distinct units. We test 7
|
1607 |
+
# of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that
|
1608 |
+
# accounts for leap seconds and %tb relies on STATAs business calendar.
|
1609 |
+
df = read_stata(datapath("io", "data", "stata", "stata13_dates.dta"))
|
1610 |
+
unformatted = df.loc[0, column]
|
1611 |
+
formatted = df.loc[0, column + "_fmt"]
|
1612 |
+
assert unformatted == formatted
|
1613 |
+
|
1614 |
+
def test_writer_117(self):
|
1615 |
+
original = DataFrame(
|
1616 |
+
data=[
|
1617 |
+
[
|
1618 |
+
"string",
|
1619 |
+
"object",
|
1620 |
+
1,
|
1621 |
+
1,
|
1622 |
+
1,
|
1623 |
+
1.1,
|
1624 |
+
1.1,
|
1625 |
+
np.datetime64("2003-12-25"),
|
1626 |
+
"a",
|
1627 |
+
"a" * 2045,
|
1628 |
+
"a" * 5000,
|
1629 |
+
"a",
|
1630 |
+
],
|
1631 |
+
[
|
1632 |
+
"string-1",
|
1633 |
+
"object-1",
|
1634 |
+
1,
|
1635 |
+
1,
|
1636 |
+
1,
|
1637 |
+
1.1,
|
1638 |
+
1.1,
|
1639 |
+
np.datetime64("2003-12-26"),
|
1640 |
+
"b",
|
1641 |
+
"b" * 2045,
|
1642 |
+
"",
|
1643 |
+
"",
|
1644 |
+
],
|
1645 |
+
],
|
1646 |
+
columns=[
|
1647 |
+
"string",
|
1648 |
+
"object",
|
1649 |
+
"int8",
|
1650 |
+
"int16",
|
1651 |
+
"int32",
|
1652 |
+
"float32",
|
1653 |
+
"float64",
|
1654 |
+
"datetime",
|
1655 |
+
"s1",
|
1656 |
+
"s2045",
|
1657 |
+
"srtl",
|
1658 |
+
"forced_strl",
|
1659 |
+
],
|
1660 |
+
)
|
1661 |
+
original["object"] = Series(original["object"], dtype=object)
|
1662 |
+
original["int8"] = Series(original["int8"], dtype=np.int8)
|
1663 |
+
original["int16"] = Series(original["int16"], dtype=np.int16)
|
1664 |
+
original["int32"] = original["int32"].astype(np.int32)
|
1665 |
+
original["float32"] = Series(original["float32"], dtype=np.float32)
|
1666 |
+
original.index.name = "index"
|
1667 |
+
original.index = original.index.astype(np.int32)
|
1668 |
+
copy = original.copy()
|
1669 |
+
with tm.ensure_clean() as path:
|
1670 |
+
original.to_stata(
|
1671 |
+
path,
|
1672 |
+
convert_dates={"datetime": "tc"},
|
1673 |
+
convert_strl=["forced_strl"],
|
1674 |
+
version=117,
|
1675 |
+
)
|
1676 |
+
written_and_read_again = self.read_dta(path)
|
1677 |
+
# original.index is np.int32, read index is np.int64
|
1678 |
+
tm.assert_frame_equal(
|
1679 |
+
written_and_read_again.set_index("index"),
|
1680 |
+
original,
|
1681 |
+
check_index_type=False,
|
1682 |
+
)
|
1683 |
+
tm.assert_frame_equal(original, copy)
|
1684 |
+
|
1685 |
+
def test_convert_strl_name_swap(self):
|
1686 |
+
original = DataFrame(
|
1687 |
+
[["a" * 3000, "A", "apple"], ["b" * 1000, "B", "banana"]],
|
1688 |
+
columns=["long1" * 10, "long", 1],
|
1689 |
+
)
|
1690 |
+
original.index.name = "index"
|
1691 |
+
|
1692 |
+
with tm.assert_produces_warning(InvalidColumnName):
|
1693 |
+
with tm.ensure_clean() as path:
|
1694 |
+
original.to_stata(path, convert_strl=["long", 1], version=117)
|
1695 |
+
reread = self.read_dta(path)
|
1696 |
+
reread = reread.set_index("index")
|
1697 |
+
reread.columns = original.columns
|
1698 |
+
tm.assert_frame_equal(reread, original, check_index_type=False)
|
1699 |
+
|
1700 |
+
def test_invalid_date_conversion(self):
|
1701 |
+
# GH 12259
|
1702 |
+
dates = [
|
1703 |
+
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
|
1704 |
+
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
|
1705 |
+
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
|
1706 |
+
]
|
1707 |
+
original = DataFrame(
|
1708 |
+
{
|
1709 |
+
"nums": [1.0, 2.0, 3.0],
|
1710 |
+
"strs": ["apple", "banana", "cherry"],
|
1711 |
+
"dates": dates,
|
1712 |
+
}
|
1713 |
+
)
|
1714 |
+
|
1715 |
+
with tm.ensure_clean() as path:
|
1716 |
+
msg = "convert_dates key must be a column or an integer"
|
1717 |
+
with pytest.raises(ValueError, match=msg):
|
1718 |
+
original.to_stata(path, convert_dates={"wrong_name": "tc"})
|
1719 |
+
|
1720 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
1721 |
+
def test_nonfile_writing(self, version):
|
1722 |
+
# GH 21041
|
1723 |
+
bio = io.BytesIO()
|
1724 |
+
df = DataFrame(
|
1725 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
1726 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
1727 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
1728 |
+
)
|
1729 |
+
df.index.name = "index"
|
1730 |
+
with tm.ensure_clean() as path:
|
1731 |
+
df.to_stata(bio, version=version)
|
1732 |
+
bio.seek(0)
|
1733 |
+
with open(path, "wb") as dta:
|
1734 |
+
dta.write(bio.read())
|
1735 |
+
reread = read_stata(path, index_col="index")
|
1736 |
+
tm.assert_frame_equal(df, reread)
|
1737 |
+
|
1738 |
+
def test_gzip_writing(self):
|
1739 |
+
# writing version 117 requires seek and cannot be used with gzip
|
1740 |
+
df = DataFrame(
|
1741 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
1742 |
+
columns=pd.Index(list("ABCD"), dtype=object),
|
1743 |
+
index=pd.Index([f"i-{i}" for i in range(30)], dtype=object),
|
1744 |
+
)
|
1745 |
+
df.index.name = "index"
|
1746 |
+
with tm.ensure_clean() as path:
|
1747 |
+
with gzip.GzipFile(path, "wb") as gz:
|
1748 |
+
df.to_stata(gz, version=114)
|
1749 |
+
with gzip.GzipFile(path, "rb") as gz:
|
1750 |
+
reread = read_stata(gz, index_col="index")
|
1751 |
+
tm.assert_frame_equal(df, reread)
|
1752 |
+
|
1753 |
+
def test_unicode_dta_118(self, datapath):
|
1754 |
+
unicode_df = self.read_dta(datapath("io", "data", "stata", "stata16_118.dta"))
|
1755 |
+
|
1756 |
+
columns = ["utf8", "latin1", "ascii", "utf8_strl", "ascii_strl"]
|
1757 |
+
values = [
|
1758 |
+
["ραηδας", "PÄNDÄS", "p", "ραηδας", "p"],
|
1759 |
+
["ƤĀńĐąŜ", "Ö", "a", "ƤĀńĐąŜ", "a"],
|
1760 |
+
["ᴘᴀᴎᴅᴀS", "Ü", "n", "ᴘᴀᴎᴅᴀS", "n"],
|
1761 |
+
[" ", " ", "d", " ", "d"],
|
1762 |
+
[" ", "", "a", " ", "a"],
|
1763 |
+
["", "", "s", "", "s"],
|
1764 |
+
["", "", " ", "", " "],
|
1765 |
+
]
|
1766 |
+
expected = DataFrame(values, columns=columns)
|
1767 |
+
|
1768 |
+
tm.assert_frame_equal(unicode_df, expected)
|
1769 |
+
|
1770 |
+
def test_mixed_string_strl(self):
|
1771 |
+
# GH 23633
|
1772 |
+
output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}]
|
1773 |
+
output = DataFrame(output)
|
1774 |
+
output.number = output.number.astype("int32")
|
1775 |
+
|
1776 |
+
with tm.ensure_clean() as path:
|
1777 |
+
output.to_stata(path, write_index=False, version=117)
|
1778 |
+
reread = read_stata(path)
|
1779 |
+
expected = output.fillna("")
|
1780 |
+
tm.assert_frame_equal(reread, expected)
|
1781 |
+
|
1782 |
+
# Check strl supports all None (null)
|
1783 |
+
output["mixed"] = None
|
1784 |
+
output.to_stata(
|
1785 |
+
path, write_index=False, convert_strl=["mixed"], version=117
|
1786 |
+
)
|
1787 |
+
reread = read_stata(path)
|
1788 |
+
expected = output.fillna("")
|
1789 |
+
tm.assert_frame_equal(reread, expected)
|
1790 |
+
|
1791 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
1792 |
+
def test_all_none_exception(self, version):
|
1793 |
+
output = [{"none": "none", "number": 0}, {"none": None, "number": 1}]
|
1794 |
+
output = DataFrame(output)
|
1795 |
+
output["none"] = None
|
1796 |
+
with tm.ensure_clean() as path:
|
1797 |
+
with pytest.raises(ValueError, match="Column `none` cannot be exported"):
|
1798 |
+
output.to_stata(path, version=version)
|
1799 |
+
|
1800 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
1801 |
+
def test_invalid_file_not_written(self, version):
|
1802 |
+
content = "Here is one __�__ Another one __·__ Another one __½__"
|
1803 |
+
df = DataFrame([content], columns=["invalid"])
|
1804 |
+
with tm.ensure_clean() as path:
|
1805 |
+
msg1 = (
|
1806 |
+
r"'latin-1' codec can't encode character '\\ufffd' "
|
1807 |
+
r"in position 14: ordinal not in range\(256\)"
|
1808 |
+
)
|
1809 |
+
msg2 = (
|
1810 |
+
"'ascii' codec can't decode byte 0xef in position 14: "
|
1811 |
+
r"ordinal not in range\(128\)"
|
1812 |
+
)
|
1813 |
+
with pytest.raises(UnicodeEncodeError, match=f"{msg1}|{msg2}"):
|
1814 |
+
df.to_stata(path)
|
1815 |
+
|
1816 |
+
def test_strl_latin1(self):
|
1817 |
+
# GH 23573, correct GSO data to reflect correct size
|
1818 |
+
output = DataFrame(
|
1819 |
+
[["pandas"] * 2, ["þâÑÐŧ"] * 2], columns=["var_str", "var_strl"]
|
1820 |
+
)
|
1821 |
+
|
1822 |
+
with tm.ensure_clean() as path:
|
1823 |
+
output.to_stata(path, version=117, convert_strl=["var_strl"])
|
1824 |
+
with open(path, "rb") as reread:
|
1825 |
+
content = reread.read()
|
1826 |
+
expected = "þâÑÐŧ"
|
1827 |
+
assert expected.encode("latin-1") in content
|
1828 |
+
assert expected.encode("utf-8") in content
|
1829 |
+
gsos = content.split(b"strls")[1][1:-2]
|
1830 |
+
for gso in gsos.split(b"GSO")[1:]:
|
1831 |
+
val = gso.split(b"\x00")[-2]
|
1832 |
+
size = gso[gso.find(b"\x82") + 1]
|
1833 |
+
assert len(val) == size - 1
|
1834 |
+
|
1835 |
+
def test_encoding_latin1_118(self, datapath):
|
1836 |
+
# GH 25960
|
1837 |
+
msg = """
|
1838 |
+
One or more strings in the dta file could not be decoded using utf-8, and
|
1839 |
+
so the fallback encoding of latin-1 is being used. This can happen when a file
|
1840 |
+
has been incorrectly encoded by Stata or some other software. You should verify
|
1841 |
+
the string values returned are correct."""
|
1842 |
+
# Move path outside of read_stata, or else assert_produces_warning
|
1843 |
+
# will block pytests skip mechanism from triggering (failing the test)
|
1844 |
+
# if the path is not present
|
1845 |
+
path = datapath("io", "data", "stata", "stata1_encoding_118.dta")
|
1846 |
+
with tm.assert_produces_warning(UnicodeWarning, filter_level="once") as w:
|
1847 |
+
encoded = read_stata(path)
|
1848 |
+
# with filter_level="always", produces 151 warnings which can be slow
|
1849 |
+
assert len(w) == 1
|
1850 |
+
assert w[0].message.args[0] == msg
|
1851 |
+
|
1852 |
+
expected = DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"])
|
1853 |
+
tm.assert_frame_equal(encoded, expected)
|
1854 |
+
|
1855 |
+
@pytest.mark.slow
|
1856 |
+
def test_stata_119(self, datapath):
|
1857 |
+
# Gzipped since contains 32,999 variables and uncompressed is 20MiB
|
1858 |
+
# Just validate that the reader reports correct number of variables
|
1859 |
+
# to avoid high peak memory
|
1860 |
+
with gzip.open(
|
1861 |
+
datapath("io", "data", "stata", "stata1_119.dta.gz"), "rb"
|
1862 |
+
) as gz:
|
1863 |
+
with StataReader(gz) as reader:
|
1864 |
+
reader._ensure_open()
|
1865 |
+
assert reader._nvar == 32999
|
1866 |
+
|
1867 |
+
@pytest.mark.parametrize("version", [118, 119, None])
|
1868 |
+
def test_utf8_writer(self, version):
|
1869 |
+
cat = pd.Categorical(["a", "β", "ĉ"], ordered=True)
|
1870 |
+
data = DataFrame(
|
1871 |
+
[
|
1872 |
+
[1.0, 1, "ᴬ", "ᴀ relatively long ŝtring"],
|
1873 |
+
[2.0, 2, "ᴮ", ""],
|
1874 |
+
[3.0, 3, "ᴰ", None],
|
1875 |
+
],
|
1876 |
+
columns=["Å", "β", "ĉ", "strls"],
|
1877 |
+
)
|
1878 |
+
data["ᴐᴬᵀ"] = cat
|
1879 |
+
variable_labels = {
|
1880 |
+
"Å": "apple",
|
1881 |
+
"β": "ᵈᵉᵊ",
|
1882 |
+
"ĉ": "ᴎტჄႲႳႴႶႺ",
|
1883 |
+
"strls": "Long Strings",
|
1884 |
+
"ᴐᴬᵀ": "",
|
1885 |
+
}
|
1886 |
+
data_label = "ᴅaᵀa-label"
|
1887 |
+
value_labels = {"β": {1: "label", 2: "æøå", 3: "ŋot valid latin-1"}}
|
1888 |
+
data["β"] = data["β"].astype(np.int32)
|
1889 |
+
with tm.ensure_clean() as path:
|
1890 |
+
writer = StataWriterUTF8(
|
1891 |
+
path,
|
1892 |
+
data,
|
1893 |
+
data_label=data_label,
|
1894 |
+
convert_strl=["strls"],
|
1895 |
+
variable_labels=variable_labels,
|
1896 |
+
write_index=False,
|
1897 |
+
version=version,
|
1898 |
+
value_labels=value_labels,
|
1899 |
+
)
|
1900 |
+
writer.write_file()
|
1901 |
+
reread_encoded = read_stata(path)
|
1902 |
+
# Missing is intentionally converted to empty strl
|
1903 |
+
data["strls"] = data["strls"].fillna("")
|
1904 |
+
# Variable with value labels is reread as categorical
|
1905 |
+
data["β"] = (
|
1906 |
+
data["β"].replace(value_labels["β"]).astype("category").cat.as_ordered()
|
1907 |
+
)
|
1908 |
+
tm.assert_frame_equal(data, reread_encoded)
|
1909 |
+
with StataReader(path) as reader:
|
1910 |
+
assert reader.data_label == data_label
|
1911 |
+
assert reader.variable_labels() == variable_labels
|
1912 |
+
|
1913 |
+
data.to_stata(path, version=version, write_index=False)
|
1914 |
+
reread_to_stata = read_stata(path)
|
1915 |
+
tm.assert_frame_equal(data, reread_to_stata)
|
1916 |
+
|
1917 |
+
def test_writer_118_exceptions(self):
|
1918 |
+
df = DataFrame(np.zeros((1, 33000), dtype=np.int8))
|
1919 |
+
with tm.ensure_clean() as path:
|
1920 |
+
with pytest.raises(ValueError, match="version must be either 118 or 119."):
|
1921 |
+
StataWriterUTF8(path, df, version=117)
|
1922 |
+
with tm.ensure_clean() as path:
|
1923 |
+
with pytest.raises(ValueError, match="You must use version 119"):
|
1924 |
+
StataWriterUTF8(path, df, version=118)
|
1925 |
+
|
1926 |
+
@pytest.mark.parametrize(
|
1927 |
+
"dtype_backend",
|
1928 |
+
["numpy_nullable", pytest.param("pyarrow", marks=td.skip_if_no("pyarrow"))],
|
1929 |
+
)
|
1930 |
+
def test_read_write_ea_dtypes(self, dtype_backend):
|
1931 |
+
df = DataFrame(
|
1932 |
+
{
|
1933 |
+
"a": [1, 2, None],
|
1934 |
+
"b": ["a", "b", "c"],
|
1935 |
+
"c": [True, False, None],
|
1936 |
+
"d": [1.5, 2.5, 3.5],
|
1937 |
+
"e": pd.date_range("2020-12-31", periods=3, freq="D"),
|
1938 |
+
},
|
1939 |
+
index=pd.Index([0, 1, 2], name="index"),
|
1940 |
+
)
|
1941 |
+
df = df.convert_dtypes(dtype_backend=dtype_backend)
|
1942 |
+
df.to_stata("test_stata.dta", version=118)
|
1943 |
+
|
1944 |
+
with tm.ensure_clean() as path:
|
1945 |
+
df.to_stata(path)
|
1946 |
+
written_and_read_again = self.read_dta(path)
|
1947 |
+
|
1948 |
+
expected = DataFrame(
|
1949 |
+
{
|
1950 |
+
"a": [1, 2, np.nan],
|
1951 |
+
"b": ["a", "b", "c"],
|
1952 |
+
"c": [1.0, 0, np.nan],
|
1953 |
+
"d": [1.5, 2.5, 3.5],
|
1954 |
+
"e": pd.date_range("2020-12-31", periods=3, freq="D"),
|
1955 |
+
},
|
1956 |
+
index=pd.Index([0, 1, 2], name="index", dtype=np.int32),
|
1957 |
+
)
|
1958 |
+
|
1959 |
+
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
|
1960 |
+
|
1961 |
+
|
1962 |
+
@pytest.mark.parametrize("version", [105, 108, 111, 113, 114])
|
1963 |
+
def test_backward_compat(version, datapath):
|
1964 |
+
data_base = datapath("io", "data", "stata")
|
1965 |
+
ref = os.path.join(data_base, "stata-compat-118.dta")
|
1966 |
+
old = os.path.join(data_base, f"stata-compat-{version}.dta")
|
1967 |
+
expected = read_stata(ref)
|
1968 |
+
old_dta = read_stata(old)
|
1969 |
+
tm.assert_frame_equal(old_dta, expected, check_dtype=False)
|
1970 |
+
|
1971 |
+
|
1972 |
+
def test_direct_read(datapath, monkeypatch):
|
1973 |
+
file_path = datapath("io", "data", "stata", "stata-compat-118.dta")
|
1974 |
+
|
1975 |
+
# Test that opening a file path doesn't buffer the file.
|
1976 |
+
with StataReader(file_path) as reader:
|
1977 |
+
# Must not have been buffered to memory
|
1978 |
+
assert not reader.read().empty
|
1979 |
+
assert not isinstance(reader._path_or_buf, io.BytesIO)
|
1980 |
+
|
1981 |
+
# Test that we use a given fp exactly, if possible.
|
1982 |
+
with open(file_path, "rb") as fp:
|
1983 |
+
with StataReader(fp) as reader:
|
1984 |
+
assert not reader.read().empty
|
1985 |
+
assert reader._path_or_buf is fp
|
1986 |
+
|
1987 |
+
# Test that we use a given BytesIO exactly, if possible.
|
1988 |
+
with open(file_path, "rb") as fp:
|
1989 |
+
with io.BytesIO(fp.read()) as bio:
|
1990 |
+
with StataReader(bio) as reader:
|
1991 |
+
assert not reader.read().empty
|
1992 |
+
assert reader._path_or_buf is bio
|
1993 |
+
|
1994 |
+
|
1995 |
+
def test_statareader_warns_when_used_without_context(datapath):
|
1996 |
+
file_path = datapath("io", "data", "stata", "stata-compat-118.dta")
|
1997 |
+
with tm.assert_produces_warning(
|
1998 |
+
ResourceWarning,
|
1999 |
+
match="without using a context manager",
|
2000 |
+
):
|
2001 |
+
sr = StataReader(file_path)
|
2002 |
+
sr.read()
|
2003 |
+
with tm.assert_produces_warning(
|
2004 |
+
FutureWarning,
|
2005 |
+
match="is not part of the public API",
|
2006 |
+
):
|
2007 |
+
sr.close()
|
2008 |
+
|
2009 |
+
|
2010 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
2011 |
+
@pytest.mark.parametrize("use_dict", [True, False])
|
2012 |
+
@pytest.mark.parametrize("infer", [True, False])
|
2013 |
+
def test_compression(compression, version, use_dict, infer, compression_to_extension):
|
2014 |
+
file_name = "dta_inferred_compression.dta"
|
2015 |
+
if compression:
|
2016 |
+
if use_dict:
|
2017 |
+
file_ext = compression
|
2018 |
+
else:
|
2019 |
+
file_ext = compression_to_extension[compression]
|
2020 |
+
file_name += f".{file_ext}"
|
2021 |
+
compression_arg = compression
|
2022 |
+
if infer:
|
2023 |
+
compression_arg = "infer"
|
2024 |
+
if use_dict:
|
2025 |
+
compression_arg = {"method": compression}
|
2026 |
+
|
2027 |
+
df = DataFrame(
|
2028 |
+
np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
|
2029 |
+
)
|
2030 |
+
df.index.name = "index"
|
2031 |
+
with tm.ensure_clean(file_name) as path:
|
2032 |
+
df.to_stata(path, version=version, compression=compression_arg)
|
2033 |
+
if compression == "gzip":
|
2034 |
+
with gzip.open(path, "rb") as comp:
|
2035 |
+
fp = io.BytesIO(comp.read())
|
2036 |
+
elif compression == "zip":
|
2037 |
+
with zipfile.ZipFile(path, "r") as comp:
|
2038 |
+
fp = io.BytesIO(comp.read(comp.filelist[0]))
|
2039 |
+
elif compression == "tar":
|
2040 |
+
with tarfile.open(path) as tar:
|
2041 |
+
fp = io.BytesIO(tar.extractfile(tar.getnames()[0]).read())
|
2042 |
+
elif compression == "bz2":
|
2043 |
+
with bz2.open(path, "rb") as comp:
|
2044 |
+
fp = io.BytesIO(comp.read())
|
2045 |
+
elif compression == "zstd":
|
2046 |
+
zstd = pytest.importorskip("zstandard")
|
2047 |
+
with zstd.open(path, "rb") as comp:
|
2048 |
+
fp = io.BytesIO(comp.read())
|
2049 |
+
elif compression == "xz":
|
2050 |
+
lzma = pytest.importorskip("lzma")
|
2051 |
+
with lzma.open(path, "rb") as comp:
|
2052 |
+
fp = io.BytesIO(comp.read())
|
2053 |
+
elif compression is None:
|
2054 |
+
fp = path
|
2055 |
+
reread = read_stata(fp, index_col="index")
|
2056 |
+
|
2057 |
+
expected = df.copy()
|
2058 |
+
expected.index = expected.index.astype(np.int32)
|
2059 |
+
tm.assert_frame_equal(reread, expected)
|
2060 |
+
|
2061 |
+
|
2062 |
+
@pytest.mark.parametrize("method", ["zip", "infer"])
|
2063 |
+
@pytest.mark.parametrize("file_ext", [None, "dta", "zip"])
|
2064 |
+
def test_compression_dict(method, file_ext):
|
2065 |
+
file_name = f"test.{file_ext}"
|
2066 |
+
archive_name = "test.dta"
|
2067 |
+
df = DataFrame(
|
2068 |
+
np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
|
2069 |
+
)
|
2070 |
+
df.index.name = "index"
|
2071 |
+
with tm.ensure_clean(file_name) as path:
|
2072 |
+
compression = {"method": method, "archive_name": archive_name}
|
2073 |
+
df.to_stata(path, compression=compression)
|
2074 |
+
if method == "zip" or file_ext == "zip":
|
2075 |
+
with zipfile.ZipFile(path, "r") as zp:
|
2076 |
+
assert len(zp.filelist) == 1
|
2077 |
+
assert zp.filelist[0].filename == archive_name
|
2078 |
+
fp = io.BytesIO(zp.read(zp.filelist[0]))
|
2079 |
+
else:
|
2080 |
+
fp = path
|
2081 |
+
reread = read_stata(fp, index_col="index")
|
2082 |
+
|
2083 |
+
expected = df.copy()
|
2084 |
+
expected.index = expected.index.astype(np.int32)
|
2085 |
+
tm.assert_frame_equal(reread, expected)
|
2086 |
+
|
2087 |
+
|
2088 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
2089 |
+
def test_chunked_categorical(version):
|
2090 |
+
df = DataFrame({"cats": Series(["a", "b", "a", "b", "c"], dtype="category")})
|
2091 |
+
df.index.name = "index"
|
2092 |
+
|
2093 |
+
expected = df.copy()
|
2094 |
+
expected.index = expected.index.astype(np.int32)
|
2095 |
+
|
2096 |
+
with tm.ensure_clean() as path:
|
2097 |
+
df.to_stata(path, version=version)
|
2098 |
+
with StataReader(path, chunksize=2, order_categoricals=False) as reader:
|
2099 |
+
for i, block in enumerate(reader):
|
2100 |
+
block = block.set_index("index")
|
2101 |
+
assert "cats" in block
|
2102 |
+
tm.assert_series_equal(
|
2103 |
+
block.cats, expected.cats.iloc[2 * i : 2 * (i + 1)]
|
2104 |
+
)
|
2105 |
+
|
2106 |
+
|
2107 |
+
def test_chunked_categorical_partial(datapath):
|
2108 |
+
dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta")
|
2109 |
+
values = ["a", "b", "a", "b", 3.0]
|
2110 |
+
with StataReader(dta_file, chunksize=2) as reader:
|
2111 |
+
with tm.assert_produces_warning(CategoricalConversionWarning):
|
2112 |
+
for i, block in enumerate(reader):
|
2113 |
+
assert list(block.cats) == values[2 * i : 2 * (i + 1)]
|
2114 |
+
if i < 2:
|
2115 |
+
idx = pd.Index(["a", "b"])
|
2116 |
+
else:
|
2117 |
+
idx = pd.Index([3.0], dtype="float64")
|
2118 |
+
tm.assert_index_equal(block.cats.cat.categories, idx)
|
2119 |
+
with tm.assert_produces_warning(CategoricalConversionWarning):
|
2120 |
+
with StataReader(dta_file, chunksize=5) as reader:
|
2121 |
+
large_chunk = reader.__next__()
|
2122 |
+
direct = read_stata(dta_file)
|
2123 |
+
tm.assert_frame_equal(direct, large_chunk)
|
2124 |
+
|
2125 |
+
|
2126 |
+
@pytest.mark.parametrize("chunksize", (-1, 0, "apple"))
|
2127 |
+
def test_iterator_errors(datapath, chunksize):
|
2128 |
+
dta_file = datapath("io", "data", "stata", "stata-dta-partially-labeled.dta")
|
2129 |
+
with pytest.raises(ValueError, match="chunksize must be a positive"):
|
2130 |
+
with StataReader(dta_file, chunksize=chunksize):
|
2131 |
+
pass
|
2132 |
+
|
2133 |
+
|
2134 |
+
def test_iterator_value_labels():
|
2135 |
+
# GH 31544
|
2136 |
+
values = ["c_label", "b_label"] + ["a_label"] * 500
|
2137 |
+
df = DataFrame({f"col{k}": pd.Categorical(values, ordered=True) for k in range(2)})
|
2138 |
+
with tm.ensure_clean() as path:
|
2139 |
+
df.to_stata(path, write_index=False)
|
2140 |
+
expected = pd.Index(["a_label", "b_label", "c_label"], dtype="object")
|
2141 |
+
with read_stata(path, chunksize=100) as reader:
|
2142 |
+
for j, chunk in enumerate(reader):
|
2143 |
+
for i in range(2):
|
2144 |
+
tm.assert_index_equal(chunk.dtypes.iloc[i].categories, expected)
|
2145 |
+
tm.assert_frame_equal(chunk, df.iloc[j * 100 : (j + 1) * 100])
|
2146 |
+
|
2147 |
+
|
2148 |
+
def test_precision_loss():
|
2149 |
+
df = DataFrame(
|
2150 |
+
[[sum(2**i for i in range(60)), sum(2**i for i in range(52))]],
|
2151 |
+
columns=["big", "little"],
|
2152 |
+
)
|
2153 |
+
with tm.ensure_clean() as path:
|
2154 |
+
with tm.assert_produces_warning(
|
2155 |
+
PossiblePrecisionLoss, match="Column converted from int64 to float64"
|
2156 |
+
):
|
2157 |
+
df.to_stata(path, write_index=False)
|
2158 |
+
reread = read_stata(path)
|
2159 |
+
expected_dt = Series([np.float64, np.float64], index=["big", "little"])
|
2160 |
+
tm.assert_series_equal(reread.dtypes, expected_dt)
|
2161 |
+
assert reread.loc[0, "little"] == df.loc[0, "little"]
|
2162 |
+
assert reread.loc[0, "big"] == float(df.loc[0, "big"])
|
2163 |
+
|
2164 |
+
|
2165 |
+
def test_compression_roundtrip(compression):
|
2166 |
+
df = DataFrame(
|
2167 |
+
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
2168 |
+
index=["A", "B"],
|
2169 |
+
columns=["X", "Y", "Z"],
|
2170 |
+
)
|
2171 |
+
df.index.name = "index"
|
2172 |
+
|
2173 |
+
with tm.ensure_clean() as path:
|
2174 |
+
df.to_stata(path, compression=compression)
|
2175 |
+
reread = read_stata(path, compression=compression, index_col="index")
|
2176 |
+
tm.assert_frame_equal(df, reread)
|
2177 |
+
|
2178 |
+
# explicitly ensure file was compressed.
|
2179 |
+
with tm.decompress_file(path, compression) as fh:
|
2180 |
+
contents = io.BytesIO(fh.read())
|
2181 |
+
reread = read_stata(contents, index_col="index")
|
2182 |
+
tm.assert_frame_equal(df, reread)
|
2183 |
+
|
2184 |
+
|
2185 |
+
@pytest.mark.parametrize("to_infer", [True, False])
|
2186 |
+
@pytest.mark.parametrize("read_infer", [True, False])
|
2187 |
+
def test_stata_compression(
|
2188 |
+
compression_only, read_infer, to_infer, compression_to_extension
|
2189 |
+
):
|
2190 |
+
compression = compression_only
|
2191 |
+
|
2192 |
+
ext = compression_to_extension[compression]
|
2193 |
+
filename = f"test.{ext}"
|
2194 |
+
|
2195 |
+
df = DataFrame(
|
2196 |
+
[[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
|
2197 |
+
index=["A", "B"],
|
2198 |
+
columns=["X", "Y", "Z"],
|
2199 |
+
)
|
2200 |
+
df.index.name = "index"
|
2201 |
+
|
2202 |
+
to_compression = "infer" if to_infer else compression
|
2203 |
+
read_compression = "infer" if read_infer else compression
|
2204 |
+
|
2205 |
+
with tm.ensure_clean(filename) as path:
|
2206 |
+
df.to_stata(path, compression=to_compression)
|
2207 |
+
result = read_stata(path, compression=read_compression, index_col="index")
|
2208 |
+
tm.assert_frame_equal(result, df)
|
2209 |
+
|
2210 |
+
|
2211 |
+
def test_non_categorical_value_labels():
|
2212 |
+
data = DataFrame(
|
2213 |
+
{
|
2214 |
+
"fully_labelled": [1, 2, 3, 3, 1],
|
2215 |
+
"partially_labelled": [1.0, 2.0, np.nan, 9.0, np.nan],
|
2216 |
+
"Y": [7, 7, 9, 8, 10],
|
2217 |
+
"Z": pd.Categorical(["j", "k", "l", "k", "j"]),
|
2218 |
+
}
|
2219 |
+
)
|
2220 |
+
|
2221 |
+
with tm.ensure_clean() as path:
|
2222 |
+
value_labels = {
|
2223 |
+
"fully_labelled": {1: "one", 2: "two", 3: "three"},
|
2224 |
+
"partially_labelled": {1.0: "one", 2.0: "two"},
|
2225 |
+
}
|
2226 |
+
expected = {**value_labels, "Z": {0: "j", 1: "k", 2: "l"}}
|
2227 |
+
|
2228 |
+
writer = StataWriter(path, data, value_labels=value_labels)
|
2229 |
+
writer.write_file()
|
2230 |
+
|
2231 |
+
with StataReader(path) as reader:
|
2232 |
+
reader_value_labels = reader.value_labels()
|
2233 |
+
assert reader_value_labels == expected
|
2234 |
+
|
2235 |
+
msg = "Can't create value labels for notY, it wasn't found in the dataset."
|
2236 |
+
with pytest.raises(KeyError, match=msg):
|
2237 |
+
value_labels = {"notY": {7: "label1", 8: "label2"}}
|
2238 |
+
StataWriter(path, data, value_labels=value_labels)
|
2239 |
+
|
2240 |
+
msg = (
|
2241 |
+
"Can't create value labels for Z, value labels "
|
2242 |
+
"can only be applied to numeric columns."
|
2243 |
+
)
|
2244 |
+
with pytest.raises(ValueError, match=msg):
|
2245 |
+
value_labels = {"Z": {1: "a", 2: "k", 3: "j", 4: "i"}}
|
2246 |
+
StataWriter(path, data, value_labels=value_labels)
|
2247 |
+
|
2248 |
+
|
2249 |
+
def test_non_categorical_value_label_name_conversion():
|
2250 |
+
# Check conversion of invalid variable names
|
2251 |
+
data = DataFrame(
|
2252 |
+
{
|
2253 |
+
"invalid~!": [1, 1, 2, 3, 5, 8], # Only alphanumeric and _
|
2254 |
+
"6_invalid": [1, 1, 2, 3, 5, 8], # Must start with letter or _
|
2255 |
+
"invalid_name_longer_than_32_characters": [8, 8, 9, 9, 8, 8], # Too long
|
2256 |
+
"aggregate": [2, 5, 5, 6, 6, 9], # Reserved words
|
2257 |
+
(1, 2): [1, 2, 3, 4, 5, 6], # Hashable non-string
|
2258 |
+
}
|
2259 |
+
)
|
2260 |
+
|
2261 |
+
value_labels = {
|
2262 |
+
"invalid~!": {1: "label1", 2: "label2"},
|
2263 |
+
"6_invalid": {1: "label1", 2: "label2"},
|
2264 |
+
"invalid_name_longer_than_32_characters": {8: "eight", 9: "nine"},
|
2265 |
+
"aggregate": {5: "five"},
|
2266 |
+
(1, 2): {3: "three"},
|
2267 |
+
}
|
2268 |
+
|
2269 |
+
expected = {
|
2270 |
+
"invalid__": {1: "label1", 2: "label2"},
|
2271 |
+
"_6_invalid": {1: "label1", 2: "label2"},
|
2272 |
+
"invalid_name_longer_than_32_char": {8: "eight", 9: "nine"},
|
2273 |
+
"_aggregate": {5: "five"},
|
2274 |
+
"_1__2_": {3: "three"},
|
2275 |
+
}
|
2276 |
+
|
2277 |
+
with tm.ensure_clean() as path:
|
2278 |
+
with tm.assert_produces_warning(InvalidColumnName):
|
2279 |
+
data.to_stata(path, value_labels=value_labels)
|
2280 |
+
|
2281 |
+
with StataReader(path) as reader:
|
2282 |
+
reader_value_labels = reader.value_labels()
|
2283 |
+
assert reader_value_labels == expected
|
2284 |
+
|
2285 |
+
|
2286 |
+
def test_non_categorical_value_label_convert_categoricals_error():
|
2287 |
+
# Mapping more than one value to the same label is valid for Stata
|
2288 |
+
# labels, but can't be read with convert_categoricals=True
|
2289 |
+
value_labels = {
|
2290 |
+
"repeated_labels": {10: "Ten", 20: "More than ten", 40: "More than ten"}
|
2291 |
+
}
|
2292 |
+
|
2293 |
+
data = DataFrame(
|
2294 |
+
{
|
2295 |
+
"repeated_labels": [10, 10, 20, 20, 40, 40],
|
2296 |
+
}
|
2297 |
+
)
|
2298 |
+
|
2299 |
+
with tm.ensure_clean() as path:
|
2300 |
+
data.to_stata(path, value_labels=value_labels)
|
2301 |
+
|
2302 |
+
with StataReader(path, convert_categoricals=False) as reader:
|
2303 |
+
reader_value_labels = reader.value_labels()
|
2304 |
+
assert reader_value_labels == value_labels
|
2305 |
+
|
2306 |
+
col = "repeated_labels"
|
2307 |
+
repeats = "-" * 80 + "\n" + "\n".join(["More than ten"])
|
2308 |
+
|
2309 |
+
msg = f"""
|
2310 |
+
Value labels for column {col} are not unique. These cannot be converted to
|
2311 |
+
pandas categoricals.
|
2312 |
+
|
2313 |
+
Either read the file with `convert_categoricals` set to False or use the
|
2314 |
+
low level interface in `StataReader` to separately read the values and the
|
2315 |
+
value_labels.
|
2316 |
+
|
2317 |
+
The repeated labels are:
|
2318 |
+
{repeats}
|
2319 |
+
"""
|
2320 |
+
with pytest.raises(ValueError, match=msg):
|
2321 |
+
read_stata(path, convert_categoricals=True)
|
2322 |
+
|
2323 |
+
|
2324 |
+
@pytest.mark.parametrize("version", [114, 117, 118, 119, None])
|
2325 |
+
@pytest.mark.parametrize(
|
2326 |
+
"dtype",
|
2327 |
+
[
|
2328 |
+
pd.BooleanDtype,
|
2329 |
+
pd.Int8Dtype,
|
2330 |
+
pd.Int16Dtype,
|
2331 |
+
pd.Int32Dtype,
|
2332 |
+
pd.Int64Dtype,
|
2333 |
+
pd.UInt8Dtype,
|
2334 |
+
pd.UInt16Dtype,
|
2335 |
+
pd.UInt32Dtype,
|
2336 |
+
pd.UInt64Dtype,
|
2337 |
+
],
|
2338 |
+
)
|
2339 |
+
def test_nullable_support(dtype, version):
|
2340 |
+
df = DataFrame(
|
2341 |
+
{
|
2342 |
+
"a": Series([1.0, 2.0, 3.0]),
|
2343 |
+
"b": Series([1, pd.NA, pd.NA], dtype=dtype.name),
|
2344 |
+
"c": Series(["a", "b", None]),
|
2345 |
+
}
|
2346 |
+
)
|
2347 |
+
dtype_name = df.b.dtype.numpy_dtype.name
|
2348 |
+
# Only use supported names: no uint, bool or int64
|
2349 |
+
dtype_name = dtype_name.replace("u", "")
|
2350 |
+
if dtype_name == "int64":
|
2351 |
+
dtype_name = "int32"
|
2352 |
+
elif dtype_name == "bool":
|
2353 |
+
dtype_name = "int8"
|
2354 |
+
value = StataMissingValue.BASE_MISSING_VALUES[dtype_name]
|
2355 |
+
smv = StataMissingValue(value)
|
2356 |
+
expected_b = Series([1, smv, smv], dtype=object, name="b")
|
2357 |
+
expected_c = Series(["a", "b", ""], name="c")
|
2358 |
+
with tm.ensure_clean() as path:
|
2359 |
+
df.to_stata(path, write_index=False, version=version)
|
2360 |
+
reread = read_stata(path, convert_missing=True)
|
2361 |
+
tm.assert_series_equal(df.a, reread.a)
|
2362 |
+
tm.assert_series_equal(reread.b, expected_b)
|
2363 |
+
tm.assert_series_equal(reread.c, expected_c)
|
2364 |
+
|
2365 |
+
|
2366 |
+
def test_empty_frame():
|
2367 |
+
# GH 46240
|
2368 |
+
# create an empty DataFrame with int64 and float64 dtypes
|
2369 |
+
df = DataFrame(data={"a": range(3), "b": [1.0, 2.0, 3.0]}).head(0)
|
2370 |
+
with tm.ensure_clean() as path:
|
2371 |
+
df.to_stata(path, write_index=False, version=117)
|
2372 |
+
# Read entire dataframe
|
2373 |
+
df2 = read_stata(path)
|
2374 |
+
assert "b" in df2
|
2375 |
+
# Dtypes don't match since no support for int32
|
2376 |
+
dtypes = Series({"a": np.dtype("int32"), "b": np.dtype("float64")})
|
2377 |
+
tm.assert_series_equal(df2.dtypes, dtypes)
|
2378 |
+
# read one column of empty .dta file
|
2379 |
+
df3 = read_stata(path, columns=["a"])
|
2380 |
+
assert "b" not in df3
|
2381 |
+
tm.assert_series_equal(df3.dtypes, dtypes.loc[["a"]])
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (194 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc
ADDED
Binary file (14.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc
ADDED
Binary file (1.33 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc
ADDED
Binary file (3.52 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc
ADDED
Binary file (27.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc
ADDED
Binary file (2.37 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc
ADDED
Binary file (13.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc
ADDED
Binary file (58.7 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc
ADDED
Binary file (6.95 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc
ADDED
Binary file (32.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc
ADDED
Binary file (23.8 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc
ADDED
Binary file (36.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc
ADDED
Binary file (4.66 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/common.py
ADDED
@@ -0,0 +1,563 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Module consolidating common testing functions for checking plotting.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from typing import TYPE_CHECKING
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
from pandas.core.dtypes.api import is_list_like
|
12 |
+
|
13 |
+
import pandas as pd
|
14 |
+
from pandas import Series
|
15 |
+
import pandas._testing as tm
|
16 |
+
|
17 |
+
if TYPE_CHECKING:
|
18 |
+
from collections.abc import Sequence
|
19 |
+
|
20 |
+
from matplotlib.axes import Axes
|
21 |
+
|
22 |
+
|
23 |
+
def _check_legend_labels(axes, labels=None, visible=True):
|
24 |
+
"""
|
25 |
+
Check each axes has expected legend labels
|
26 |
+
|
27 |
+
Parameters
|
28 |
+
----------
|
29 |
+
axes : matplotlib Axes object, or its list-like
|
30 |
+
labels : list-like
|
31 |
+
expected legend labels
|
32 |
+
visible : bool
|
33 |
+
expected legend visibility. labels are checked only when visible is
|
34 |
+
True
|
35 |
+
"""
|
36 |
+
if visible and (labels is None):
|
37 |
+
raise ValueError("labels must be specified when visible is True")
|
38 |
+
axes = _flatten_visible(axes)
|
39 |
+
for ax in axes:
|
40 |
+
if visible:
|
41 |
+
assert ax.get_legend() is not None
|
42 |
+
_check_text_labels(ax.get_legend().get_texts(), labels)
|
43 |
+
else:
|
44 |
+
assert ax.get_legend() is None
|
45 |
+
|
46 |
+
|
47 |
+
def _check_legend_marker(ax, expected_markers=None, visible=True):
|
48 |
+
"""
|
49 |
+
Check ax has expected legend markers
|
50 |
+
|
51 |
+
Parameters
|
52 |
+
----------
|
53 |
+
ax : matplotlib Axes object
|
54 |
+
expected_markers : list-like
|
55 |
+
expected legend markers
|
56 |
+
visible : bool
|
57 |
+
expected legend visibility. labels are checked only when visible is
|
58 |
+
True
|
59 |
+
"""
|
60 |
+
if visible and (expected_markers is None):
|
61 |
+
raise ValueError("Markers must be specified when visible is True")
|
62 |
+
if visible:
|
63 |
+
handles, _ = ax.get_legend_handles_labels()
|
64 |
+
markers = [handle.get_marker() for handle in handles]
|
65 |
+
assert markers == expected_markers
|
66 |
+
else:
|
67 |
+
assert ax.get_legend() is None
|
68 |
+
|
69 |
+
|
70 |
+
def _check_data(xp, rs):
|
71 |
+
"""
|
72 |
+
Check each axes has identical lines
|
73 |
+
|
74 |
+
Parameters
|
75 |
+
----------
|
76 |
+
xp : matplotlib Axes object
|
77 |
+
rs : matplotlib Axes object
|
78 |
+
"""
|
79 |
+
import matplotlib.pyplot as plt
|
80 |
+
|
81 |
+
xp_lines = xp.get_lines()
|
82 |
+
rs_lines = rs.get_lines()
|
83 |
+
|
84 |
+
assert len(xp_lines) == len(rs_lines)
|
85 |
+
for xpl, rsl in zip(xp_lines, rs_lines):
|
86 |
+
xpdata = xpl.get_xydata()
|
87 |
+
rsdata = rsl.get_xydata()
|
88 |
+
tm.assert_almost_equal(xpdata, rsdata)
|
89 |
+
|
90 |
+
plt.close("all")
|
91 |
+
|
92 |
+
|
93 |
+
def _check_visible(collections, visible=True):
|
94 |
+
"""
|
95 |
+
Check each artist is visible or not
|
96 |
+
|
97 |
+
Parameters
|
98 |
+
----------
|
99 |
+
collections : matplotlib Artist or its list-like
|
100 |
+
target Artist or its list or collection
|
101 |
+
visible : bool
|
102 |
+
expected visibility
|
103 |
+
"""
|
104 |
+
from matplotlib.collections import Collection
|
105 |
+
|
106 |
+
if not isinstance(collections, Collection) and not is_list_like(collections):
|
107 |
+
collections = [collections]
|
108 |
+
|
109 |
+
for patch in collections:
|
110 |
+
assert patch.get_visible() == visible
|
111 |
+
|
112 |
+
|
113 |
+
def _check_patches_all_filled(axes: Axes | Sequence[Axes], filled: bool = True) -> None:
|
114 |
+
"""
|
115 |
+
Check for each artist whether it is filled or not
|
116 |
+
|
117 |
+
Parameters
|
118 |
+
----------
|
119 |
+
axes : matplotlib Axes object, or its list-like
|
120 |
+
filled : bool
|
121 |
+
expected filling
|
122 |
+
"""
|
123 |
+
|
124 |
+
axes = _flatten_visible(axes)
|
125 |
+
for ax in axes:
|
126 |
+
for patch in ax.patches:
|
127 |
+
assert patch.fill == filled
|
128 |
+
|
129 |
+
|
130 |
+
def _get_colors_mapped(series, colors):
|
131 |
+
unique = series.unique()
|
132 |
+
# unique and colors length can be differed
|
133 |
+
# depending on slice value
|
134 |
+
mapped = dict(zip(unique, colors))
|
135 |
+
return [mapped[v] for v in series.values]
|
136 |
+
|
137 |
+
|
138 |
+
def _check_colors(collections, linecolors=None, facecolors=None, mapping=None):
|
139 |
+
"""
|
140 |
+
Check each artist has expected line colors and face colors
|
141 |
+
|
142 |
+
Parameters
|
143 |
+
----------
|
144 |
+
collections : list-like
|
145 |
+
list or collection of target artist
|
146 |
+
linecolors : list-like which has the same length as collections
|
147 |
+
list of expected line colors
|
148 |
+
facecolors : list-like which has the same length as collections
|
149 |
+
list of expected face colors
|
150 |
+
mapping : Series
|
151 |
+
Series used for color grouping key
|
152 |
+
used for andrew_curves, parallel_coordinates, radviz test
|
153 |
+
"""
|
154 |
+
from matplotlib import colors
|
155 |
+
from matplotlib.collections import (
|
156 |
+
Collection,
|
157 |
+
LineCollection,
|
158 |
+
PolyCollection,
|
159 |
+
)
|
160 |
+
from matplotlib.lines import Line2D
|
161 |
+
|
162 |
+
conv = colors.ColorConverter
|
163 |
+
if linecolors is not None:
|
164 |
+
if mapping is not None:
|
165 |
+
linecolors = _get_colors_mapped(mapping, linecolors)
|
166 |
+
linecolors = linecolors[: len(collections)]
|
167 |
+
|
168 |
+
assert len(collections) == len(linecolors)
|
169 |
+
for patch, color in zip(collections, linecolors):
|
170 |
+
if isinstance(patch, Line2D):
|
171 |
+
result = patch.get_color()
|
172 |
+
# Line2D may contains string color expression
|
173 |
+
result = conv.to_rgba(result)
|
174 |
+
elif isinstance(patch, (PolyCollection, LineCollection)):
|
175 |
+
result = tuple(patch.get_edgecolor()[0])
|
176 |
+
else:
|
177 |
+
result = patch.get_edgecolor()
|
178 |
+
|
179 |
+
expected = conv.to_rgba(color)
|
180 |
+
assert result == expected
|
181 |
+
|
182 |
+
if facecolors is not None:
|
183 |
+
if mapping is not None:
|
184 |
+
facecolors = _get_colors_mapped(mapping, facecolors)
|
185 |
+
facecolors = facecolors[: len(collections)]
|
186 |
+
|
187 |
+
assert len(collections) == len(facecolors)
|
188 |
+
for patch, color in zip(collections, facecolors):
|
189 |
+
if isinstance(patch, Collection):
|
190 |
+
# returned as list of np.array
|
191 |
+
result = patch.get_facecolor()[0]
|
192 |
+
else:
|
193 |
+
result = patch.get_facecolor()
|
194 |
+
|
195 |
+
if isinstance(result, np.ndarray):
|
196 |
+
result = tuple(result)
|
197 |
+
|
198 |
+
expected = conv.to_rgba(color)
|
199 |
+
assert result == expected
|
200 |
+
|
201 |
+
|
202 |
+
def _check_text_labels(texts, expected):
|
203 |
+
"""
|
204 |
+
Check each text has expected labels
|
205 |
+
|
206 |
+
Parameters
|
207 |
+
----------
|
208 |
+
texts : matplotlib Text object, or its list-like
|
209 |
+
target text, or its list
|
210 |
+
expected : str or list-like which has the same length as texts
|
211 |
+
expected text label, or its list
|
212 |
+
"""
|
213 |
+
if not is_list_like(texts):
|
214 |
+
assert texts.get_text() == expected
|
215 |
+
else:
|
216 |
+
labels = [t.get_text() for t in texts]
|
217 |
+
assert len(labels) == len(expected)
|
218 |
+
for label, e in zip(labels, expected):
|
219 |
+
assert label == e
|
220 |
+
|
221 |
+
|
222 |
+
def _check_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None):
|
223 |
+
"""
|
224 |
+
Check each axes has expected tick properties
|
225 |
+
|
226 |
+
Parameters
|
227 |
+
----------
|
228 |
+
axes : matplotlib Axes object, or its list-like
|
229 |
+
xlabelsize : number
|
230 |
+
expected xticks font size
|
231 |
+
xrot : number
|
232 |
+
expected xticks rotation
|
233 |
+
ylabelsize : number
|
234 |
+
expected yticks font size
|
235 |
+
yrot : number
|
236 |
+
expected yticks rotation
|
237 |
+
"""
|
238 |
+
from matplotlib.ticker import NullFormatter
|
239 |
+
|
240 |
+
axes = _flatten_visible(axes)
|
241 |
+
for ax in axes:
|
242 |
+
if xlabelsize is not None or xrot is not None:
|
243 |
+
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
|
244 |
+
# If minor ticks has NullFormatter, rot / fontsize are not
|
245 |
+
# retained
|
246 |
+
labels = ax.get_xticklabels()
|
247 |
+
else:
|
248 |
+
labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
|
249 |
+
|
250 |
+
for label in labels:
|
251 |
+
if xlabelsize is not None:
|
252 |
+
tm.assert_almost_equal(label.get_fontsize(), xlabelsize)
|
253 |
+
if xrot is not None:
|
254 |
+
tm.assert_almost_equal(label.get_rotation(), xrot)
|
255 |
+
|
256 |
+
if ylabelsize is not None or yrot is not None:
|
257 |
+
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
|
258 |
+
labels = ax.get_yticklabels()
|
259 |
+
else:
|
260 |
+
labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
|
261 |
+
|
262 |
+
for label in labels:
|
263 |
+
if ylabelsize is not None:
|
264 |
+
tm.assert_almost_equal(label.get_fontsize(), ylabelsize)
|
265 |
+
if yrot is not None:
|
266 |
+
tm.assert_almost_equal(label.get_rotation(), yrot)
|
267 |
+
|
268 |
+
|
269 |
+
def _check_ax_scales(axes, xaxis="linear", yaxis="linear"):
|
270 |
+
"""
|
271 |
+
Check each axes has expected scales
|
272 |
+
|
273 |
+
Parameters
|
274 |
+
----------
|
275 |
+
axes : matplotlib Axes object, or its list-like
|
276 |
+
xaxis : {'linear', 'log'}
|
277 |
+
expected xaxis scale
|
278 |
+
yaxis : {'linear', 'log'}
|
279 |
+
expected yaxis scale
|
280 |
+
"""
|
281 |
+
axes = _flatten_visible(axes)
|
282 |
+
for ax in axes:
|
283 |
+
assert ax.xaxis.get_scale() == xaxis
|
284 |
+
assert ax.yaxis.get_scale() == yaxis
|
285 |
+
|
286 |
+
|
287 |
+
def _check_axes_shape(axes, axes_num=None, layout=None, figsize=None):
|
288 |
+
"""
|
289 |
+
Check expected number of axes is drawn in expected layout
|
290 |
+
|
291 |
+
Parameters
|
292 |
+
----------
|
293 |
+
axes : matplotlib Axes object, or its list-like
|
294 |
+
axes_num : number
|
295 |
+
expected number of axes. Unnecessary axes should be set to
|
296 |
+
invisible.
|
297 |
+
layout : tuple
|
298 |
+
expected layout, (expected number of rows , columns)
|
299 |
+
figsize : tuple
|
300 |
+
expected figsize. default is matplotlib default
|
301 |
+
"""
|
302 |
+
from pandas.plotting._matplotlib.tools import flatten_axes
|
303 |
+
|
304 |
+
if figsize is None:
|
305 |
+
figsize = (6.4, 4.8)
|
306 |
+
visible_axes = _flatten_visible(axes)
|
307 |
+
|
308 |
+
if axes_num is not None:
|
309 |
+
assert len(visible_axes) == axes_num
|
310 |
+
for ax in visible_axes:
|
311 |
+
# check something drawn on visible axes
|
312 |
+
assert len(ax.get_children()) > 0
|
313 |
+
|
314 |
+
if layout is not None:
|
315 |
+
x_set = set()
|
316 |
+
y_set = set()
|
317 |
+
for ax in flatten_axes(axes):
|
318 |
+
# check axes coordinates to estimate layout
|
319 |
+
points = ax.get_position().get_points()
|
320 |
+
x_set.add(points[0][0])
|
321 |
+
y_set.add(points[0][1])
|
322 |
+
result = (len(y_set), len(x_set))
|
323 |
+
assert result == layout
|
324 |
+
|
325 |
+
tm.assert_numpy_array_equal(
|
326 |
+
visible_axes[0].figure.get_size_inches(),
|
327 |
+
np.array(figsize, dtype=np.float64),
|
328 |
+
)
|
329 |
+
|
330 |
+
|
331 |
+
def _flatten_visible(axes: Axes | Sequence[Axes]) -> Sequence[Axes]:
|
332 |
+
"""
|
333 |
+
Flatten axes, and filter only visible
|
334 |
+
|
335 |
+
Parameters
|
336 |
+
----------
|
337 |
+
axes : matplotlib Axes object, or its list-like
|
338 |
+
|
339 |
+
"""
|
340 |
+
from pandas.plotting._matplotlib.tools import flatten_axes
|
341 |
+
|
342 |
+
axes_ndarray = flatten_axes(axes)
|
343 |
+
axes = [ax for ax in axes_ndarray if ax.get_visible()]
|
344 |
+
return axes
|
345 |
+
|
346 |
+
|
347 |
+
def _check_has_errorbars(axes, xerr=0, yerr=0):
|
348 |
+
"""
|
349 |
+
Check axes has expected number of errorbars
|
350 |
+
|
351 |
+
Parameters
|
352 |
+
----------
|
353 |
+
axes : matplotlib Axes object, or its list-like
|
354 |
+
xerr : number
|
355 |
+
expected number of x errorbar
|
356 |
+
yerr : number
|
357 |
+
expected number of y errorbar
|
358 |
+
"""
|
359 |
+
axes = _flatten_visible(axes)
|
360 |
+
for ax in axes:
|
361 |
+
containers = ax.containers
|
362 |
+
xerr_count = 0
|
363 |
+
yerr_count = 0
|
364 |
+
for c in containers:
|
365 |
+
has_xerr = getattr(c, "has_xerr", False)
|
366 |
+
has_yerr = getattr(c, "has_yerr", False)
|
367 |
+
if has_xerr:
|
368 |
+
xerr_count += 1
|
369 |
+
if has_yerr:
|
370 |
+
yerr_count += 1
|
371 |
+
assert xerr == xerr_count
|
372 |
+
assert yerr == yerr_count
|
373 |
+
|
374 |
+
|
375 |
+
def _check_box_return_type(
|
376 |
+
returned, return_type, expected_keys=None, check_ax_title=True
|
377 |
+
):
|
378 |
+
"""
|
379 |
+
Check box returned type is correct
|
380 |
+
|
381 |
+
Parameters
|
382 |
+
----------
|
383 |
+
returned : object to be tested, returned from boxplot
|
384 |
+
return_type : str
|
385 |
+
return_type passed to boxplot
|
386 |
+
expected_keys : list-like, optional
|
387 |
+
group labels in subplot case. If not passed,
|
388 |
+
the function checks assuming boxplot uses single ax
|
389 |
+
check_ax_title : bool
|
390 |
+
Whether to check the ax.title is the same as expected_key
|
391 |
+
Intended to be checked by calling from ``boxplot``.
|
392 |
+
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
|
393 |
+
"""
|
394 |
+
from matplotlib.axes import Axes
|
395 |
+
|
396 |
+
types = {"dict": dict, "axes": Axes, "both": tuple}
|
397 |
+
if expected_keys is None:
|
398 |
+
# should be fixed when the returning default is changed
|
399 |
+
if return_type is None:
|
400 |
+
return_type = "dict"
|
401 |
+
|
402 |
+
assert isinstance(returned, types[return_type])
|
403 |
+
if return_type == "both":
|
404 |
+
assert isinstance(returned.ax, Axes)
|
405 |
+
assert isinstance(returned.lines, dict)
|
406 |
+
else:
|
407 |
+
# should be fixed when the returning default is changed
|
408 |
+
if return_type is None:
|
409 |
+
for r in _flatten_visible(returned):
|
410 |
+
assert isinstance(r, Axes)
|
411 |
+
return
|
412 |
+
|
413 |
+
assert isinstance(returned, Series)
|
414 |
+
|
415 |
+
assert sorted(returned.keys()) == sorted(expected_keys)
|
416 |
+
for key, value in returned.items():
|
417 |
+
assert isinstance(value, types[return_type])
|
418 |
+
# check returned dict has correct mapping
|
419 |
+
if return_type == "axes":
|
420 |
+
if check_ax_title:
|
421 |
+
assert value.get_title() == key
|
422 |
+
elif return_type == "both":
|
423 |
+
if check_ax_title:
|
424 |
+
assert value.ax.get_title() == key
|
425 |
+
assert isinstance(value.ax, Axes)
|
426 |
+
assert isinstance(value.lines, dict)
|
427 |
+
elif return_type == "dict":
|
428 |
+
line = value["medians"][0]
|
429 |
+
axes = line.axes
|
430 |
+
if check_ax_title:
|
431 |
+
assert axes.get_title() == key
|
432 |
+
else:
|
433 |
+
raise AssertionError
|
434 |
+
|
435 |
+
|
436 |
+
def _check_grid_settings(obj, kinds, kws={}):
|
437 |
+
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
|
438 |
+
|
439 |
+
import matplotlib as mpl
|
440 |
+
|
441 |
+
def is_grid_on():
|
442 |
+
xticks = mpl.pyplot.gca().xaxis.get_major_ticks()
|
443 |
+
yticks = mpl.pyplot.gca().yaxis.get_major_ticks()
|
444 |
+
xoff = all(not g.gridline.get_visible() for g in xticks)
|
445 |
+
yoff = all(not g.gridline.get_visible() for g in yticks)
|
446 |
+
|
447 |
+
return not (xoff and yoff)
|
448 |
+
|
449 |
+
spndx = 1
|
450 |
+
for kind in kinds:
|
451 |
+
mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
|
452 |
+
spndx += 1
|
453 |
+
mpl.rc("axes", grid=False)
|
454 |
+
obj.plot(kind=kind, **kws)
|
455 |
+
assert not is_grid_on()
|
456 |
+
mpl.pyplot.clf()
|
457 |
+
|
458 |
+
mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
|
459 |
+
spndx += 1
|
460 |
+
mpl.rc("axes", grid=True)
|
461 |
+
obj.plot(kind=kind, grid=False, **kws)
|
462 |
+
assert not is_grid_on()
|
463 |
+
mpl.pyplot.clf()
|
464 |
+
|
465 |
+
if kind not in ["pie", "hexbin", "scatter"]:
|
466 |
+
mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
|
467 |
+
spndx += 1
|
468 |
+
mpl.rc("axes", grid=True)
|
469 |
+
obj.plot(kind=kind, **kws)
|
470 |
+
assert is_grid_on()
|
471 |
+
mpl.pyplot.clf()
|
472 |
+
|
473 |
+
mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
|
474 |
+
spndx += 1
|
475 |
+
mpl.rc("axes", grid=False)
|
476 |
+
obj.plot(kind=kind, grid=True, **kws)
|
477 |
+
assert is_grid_on()
|
478 |
+
mpl.pyplot.clf()
|
479 |
+
|
480 |
+
|
481 |
+
def _unpack_cycler(rcParams, field="color"):
|
482 |
+
"""
|
483 |
+
Auxiliary function for correctly unpacking cycler after MPL >= 1.5
|
484 |
+
"""
|
485 |
+
return [v[field] for v in rcParams["axes.prop_cycle"]]
|
486 |
+
|
487 |
+
|
488 |
+
def get_x_axis(ax):
|
489 |
+
return ax._shared_axes["x"]
|
490 |
+
|
491 |
+
|
492 |
+
def get_y_axis(ax):
|
493 |
+
return ax._shared_axes["y"]
|
494 |
+
|
495 |
+
|
496 |
+
def _check_plot_works(f, default_axes=False, **kwargs):
|
497 |
+
"""
|
498 |
+
Create plot and ensure that plot return object is valid.
|
499 |
+
|
500 |
+
Parameters
|
501 |
+
----------
|
502 |
+
f : func
|
503 |
+
Plotting function.
|
504 |
+
default_axes : bool, optional
|
505 |
+
If False (default):
|
506 |
+
- If `ax` not in `kwargs`, then create subplot(211) and plot there
|
507 |
+
- Create new subplot(212) and plot there as well
|
508 |
+
- Mind special corner case for bootstrap_plot (see `_gen_two_subplots`)
|
509 |
+
If True:
|
510 |
+
- Simply run plotting function with kwargs provided
|
511 |
+
- All required axes instances will be created automatically
|
512 |
+
- It is recommended to use it when the plotting function
|
513 |
+
creates multiple axes itself. It helps avoid warnings like
|
514 |
+
'UserWarning: To output multiple subplots,
|
515 |
+
the figure containing the passed axes is being cleared'
|
516 |
+
**kwargs
|
517 |
+
Keyword arguments passed to the plotting function.
|
518 |
+
|
519 |
+
Returns
|
520 |
+
-------
|
521 |
+
Plot object returned by the last plotting.
|
522 |
+
"""
|
523 |
+
import matplotlib.pyplot as plt
|
524 |
+
|
525 |
+
if default_axes:
|
526 |
+
gen_plots = _gen_default_plot
|
527 |
+
else:
|
528 |
+
gen_plots = _gen_two_subplots
|
529 |
+
|
530 |
+
ret = None
|
531 |
+
try:
|
532 |
+
fig = kwargs.get("figure", plt.gcf())
|
533 |
+
plt.clf()
|
534 |
+
|
535 |
+
for ret in gen_plots(f, fig, **kwargs):
|
536 |
+
tm.assert_is_valid_plot_return_object(ret)
|
537 |
+
|
538 |
+
finally:
|
539 |
+
plt.close(fig)
|
540 |
+
|
541 |
+
return ret
|
542 |
+
|
543 |
+
|
544 |
+
def _gen_default_plot(f, fig, **kwargs):
|
545 |
+
"""
|
546 |
+
Create plot in a default way.
|
547 |
+
"""
|
548 |
+
yield f(**kwargs)
|
549 |
+
|
550 |
+
|
551 |
+
def _gen_two_subplots(f, fig, **kwargs):
|
552 |
+
"""
|
553 |
+
Create plot on two subplots forcefully created.
|
554 |
+
"""
|
555 |
+
if "ax" not in kwargs:
|
556 |
+
fig.add_subplot(211)
|
557 |
+
yield f(**kwargs)
|
558 |
+
|
559 |
+
if f is pd.plotting.bootstrap_plot:
|
560 |
+
assert "ax" not in kwargs
|
561 |
+
else:
|
562 |
+
kwargs["ax"] = fig.add_subplot(212)
|
563 |
+
yield f(**kwargs)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gc
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas import (
|
7 |
+
DataFrame,
|
8 |
+
to_datetime,
|
9 |
+
)
|
10 |
+
|
11 |
+
|
12 |
+
@pytest.fixture(autouse=True)
|
13 |
+
def mpl_cleanup():
|
14 |
+
# matplotlib/testing/decorators.py#L24
|
15 |
+
# 1) Resets units registry
|
16 |
+
# 2) Resets rc_context
|
17 |
+
# 3) Closes all figures
|
18 |
+
mpl = pytest.importorskip("matplotlib")
|
19 |
+
mpl_units = pytest.importorskip("matplotlib.units")
|
20 |
+
plt = pytest.importorskip("matplotlib.pyplot")
|
21 |
+
orig_units_registry = mpl_units.registry.copy()
|
22 |
+
with mpl.rc_context():
|
23 |
+
mpl.use("template")
|
24 |
+
yield
|
25 |
+
mpl_units.registry.clear()
|
26 |
+
mpl_units.registry.update(orig_units_registry)
|
27 |
+
plt.close("all")
|
28 |
+
# https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501
|
29 |
+
gc.collect(1)
|
30 |
+
|
31 |
+
|
32 |
+
@pytest.fixture
|
33 |
+
def hist_df():
|
34 |
+
n = 50
|
35 |
+
rng = np.random.default_rng(10)
|
36 |
+
gender = rng.choice(["Male", "Female"], size=n)
|
37 |
+
classroom = rng.choice(["A", "B", "C"], size=n)
|
38 |
+
|
39 |
+
hist_df = DataFrame(
|
40 |
+
{
|
41 |
+
"gender": gender,
|
42 |
+
"classroom": classroom,
|
43 |
+
"height": rng.normal(66, 4, size=n),
|
44 |
+
"weight": rng.normal(161, 32, size=n),
|
45 |
+
"category": rng.integers(4, size=n),
|
46 |
+
"datetime": to_datetime(
|
47 |
+
rng.integers(
|
48 |
+
812419200000000000,
|
49 |
+
819331200000000000,
|
50 |
+
size=n,
|
51 |
+
dtype=np.int64,
|
52 |
+
)
|
53 |
+
),
|
54 |
+
}
|
55 |
+
)
|
56 |
+
return hist_df
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (200 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc
ADDED
Binary file (89.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc
ADDED
Binary file (31.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc
ADDED
Binary file (1.97 kB). View file
|
|