Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_upcast.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_chunksize.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_data_list.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_file_buffer_url.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_float.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_inf.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_ints.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_read_errors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_chunksize.py +378 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_common_basic.py +979 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_data_list.py +91 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_decimal.py +72 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_file_buffer_url.py +478 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_float.py +79 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_inf.py +78 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_iterator.py +134 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_read_errors.py +320 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (187 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/conftest.cpython-310.pyc
ADDED
Binary file (7.82 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_c_parser_only.cpython-310.pyc
ADDED
Binary file (16.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_comment.cpython-310.pyc
ADDED
Binary file (5.64 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_compression.cpython-310.pyc
ADDED
Binary file (6.26 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_concatenate_chunks.cpython-310.pyc
ADDED
Binary file (1.36 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_converters.cpython-310.pyc
ADDED
Binary file (6.57 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_dialect.cpython-310.pyc
ADDED
Binary file (4.49 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_encoding.cpython-310.pyc
ADDED
Binary file (9.21 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_header.cpython-310.pyc
ADDED
Binary file (16.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_index_col.cpython-310.pyc
ADDED
Binary file (9.54 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_mangle_dupes.cpython-310.pyc
ADDED
Binary file (4.58 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_multi_thread.cpython-310.pyc
ADDED
Binary file (4.24 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_na_values.cpython-310.pyc
ADDED
Binary file (16.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_parse_dates.cpython-310.pyc
ADDED
Binary file (44.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_python_parser_only.cpython-310.pyc
ADDED
Binary file (15.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_quoting.cpython-310.pyc
ADDED
Binary file (4.88 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_read_fwf.cpython-310.pyc
ADDED
Binary file (24.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_skiprows.cpython-310.pyc
ADDED
Binary file (8.64 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_textreader.cpython-310.pyc
ADDED
Binary file (10.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_unsupported.cpython-310.pyc
ADDED
Binary file (8.59 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__pycache__/test_upcast.cpython-310.pyc
ADDED
Binary file (2.84 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (194 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_chunksize.cpython-310.pyc
ADDED
Binary file (9.67 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_common_basic.cpython-310.pyc
ADDED
Binary file (24.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_data_list.cpython-310.pyc
ADDED
Binary file (2.36 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_decimal.cpython-310.pyc
ADDED
Binary file (1.97 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_file_buffer_url.cpython-310.pyc
ADDED
Binary file (12.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_float.cpython-310.pyc
ADDED
Binary file (2.58 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_index.cpython-310.pyc
ADDED
Binary file (6.03 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_inf.cpython-310.pyc
ADDED
Binary file (2.13 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_ints.cpython-310.pyc
ADDED
Binary file (5.56 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_iterator.cpython-310.pyc
ADDED
Binary file (3.71 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_read_errors.cpython-310.pyc
ADDED
Binary file (8.61 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/__pycache__/test_verbose.cpython-310.pyc
ADDED
Binary file (2.04 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_chunksize.py
ADDED
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on both the Python and C engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
from io import StringIO
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
from pandas._libs import parsers as libparsers
|
11 |
+
from pandas.errors import DtypeWarning
|
12 |
+
|
13 |
+
from pandas import (
|
14 |
+
DataFrame,
|
15 |
+
concat,
|
16 |
+
)
|
17 |
+
import pandas._testing as tm
|
18 |
+
|
19 |
+
pytestmark = pytest.mark.filterwarnings(
|
20 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
21 |
+
)
|
22 |
+
|
23 |
+
|
24 |
+
@pytest.mark.parametrize("index_col", [0, "index"])
|
25 |
+
def test_read_chunksize_with_index(all_parsers, index_col):
|
26 |
+
parser = all_parsers
|
27 |
+
data = """index,A,B,C,D
|
28 |
+
foo,2,3,4,5
|
29 |
+
bar,7,8,9,10
|
30 |
+
baz,12,13,14,15
|
31 |
+
qux,12,13,14,15
|
32 |
+
foo2,12,13,14,15
|
33 |
+
bar2,12,13,14,15
|
34 |
+
"""
|
35 |
+
|
36 |
+
expected = DataFrame(
|
37 |
+
[
|
38 |
+
["foo", 2, 3, 4, 5],
|
39 |
+
["bar", 7, 8, 9, 10],
|
40 |
+
["baz", 12, 13, 14, 15],
|
41 |
+
["qux", 12, 13, 14, 15],
|
42 |
+
["foo2", 12, 13, 14, 15],
|
43 |
+
["bar2", 12, 13, 14, 15],
|
44 |
+
],
|
45 |
+
columns=["index", "A", "B", "C", "D"],
|
46 |
+
)
|
47 |
+
expected = expected.set_index("index")
|
48 |
+
|
49 |
+
if parser.engine == "pyarrow":
|
50 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
51 |
+
with pytest.raises(ValueError, match=msg):
|
52 |
+
with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
|
53 |
+
list(reader)
|
54 |
+
return
|
55 |
+
|
56 |
+
with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:
|
57 |
+
chunks = list(reader)
|
58 |
+
tm.assert_frame_equal(chunks[0], expected[:2])
|
59 |
+
tm.assert_frame_equal(chunks[1], expected[2:4])
|
60 |
+
tm.assert_frame_equal(chunks[2], expected[4:])
|
61 |
+
|
62 |
+
|
63 |
+
@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])
|
64 |
+
def test_read_chunksize_bad(all_parsers, chunksize):
|
65 |
+
data = """index,A,B,C,D
|
66 |
+
foo,2,3,4,5
|
67 |
+
bar,7,8,9,10
|
68 |
+
baz,12,13,14,15
|
69 |
+
qux,12,13,14,15
|
70 |
+
foo2,12,13,14,15
|
71 |
+
bar2,12,13,14,15
|
72 |
+
"""
|
73 |
+
parser = all_parsers
|
74 |
+
msg = r"'chunksize' must be an integer >=1"
|
75 |
+
if parser.engine == "pyarrow":
|
76 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
77 |
+
|
78 |
+
with pytest.raises(ValueError, match=msg):
|
79 |
+
with parser.read_csv(StringIO(data), chunksize=chunksize) as _:
|
80 |
+
pass
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.mark.parametrize("chunksize", [2, 8])
|
84 |
+
def test_read_chunksize_and_nrows(all_parsers, chunksize):
|
85 |
+
# see gh-15755
|
86 |
+
data = """index,A,B,C,D
|
87 |
+
foo,2,3,4,5
|
88 |
+
bar,7,8,9,10
|
89 |
+
baz,12,13,14,15
|
90 |
+
qux,12,13,14,15
|
91 |
+
foo2,12,13,14,15
|
92 |
+
bar2,12,13,14,15
|
93 |
+
"""
|
94 |
+
parser = all_parsers
|
95 |
+
kwargs = {"index_col": 0, "nrows": 5}
|
96 |
+
|
97 |
+
if parser.engine == "pyarrow":
|
98 |
+
msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
|
99 |
+
with pytest.raises(ValueError, match=msg):
|
100 |
+
parser.read_csv(StringIO(data), **kwargs)
|
101 |
+
return
|
102 |
+
|
103 |
+
expected = parser.read_csv(StringIO(data), **kwargs)
|
104 |
+
with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader:
|
105 |
+
tm.assert_frame_equal(concat(reader), expected)
|
106 |
+
|
107 |
+
|
108 |
+
def test_read_chunksize_and_nrows_changing_size(all_parsers):
|
109 |
+
data = """index,A,B,C,D
|
110 |
+
foo,2,3,4,5
|
111 |
+
bar,7,8,9,10
|
112 |
+
baz,12,13,14,15
|
113 |
+
qux,12,13,14,15
|
114 |
+
foo2,12,13,14,15
|
115 |
+
bar2,12,13,14,15
|
116 |
+
"""
|
117 |
+
parser = all_parsers
|
118 |
+
kwargs = {"index_col": 0, "nrows": 5}
|
119 |
+
|
120 |
+
if parser.engine == "pyarrow":
|
121 |
+
msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
|
122 |
+
with pytest.raises(ValueError, match=msg):
|
123 |
+
parser.read_csv(StringIO(data), **kwargs)
|
124 |
+
return
|
125 |
+
|
126 |
+
expected = parser.read_csv(StringIO(data), **kwargs)
|
127 |
+
with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader:
|
128 |
+
tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])
|
129 |
+
tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])
|
130 |
+
|
131 |
+
with pytest.raises(StopIteration, match=""):
|
132 |
+
reader.get_chunk(size=3)
|
133 |
+
|
134 |
+
|
135 |
+
def test_get_chunk_passed_chunksize(all_parsers):
|
136 |
+
parser = all_parsers
|
137 |
+
data = """A,B,C
|
138 |
+
1,2,3
|
139 |
+
4,5,6
|
140 |
+
7,8,9
|
141 |
+
1,2,3"""
|
142 |
+
|
143 |
+
if parser.engine == "pyarrow":
|
144 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
145 |
+
with pytest.raises(ValueError, match=msg):
|
146 |
+
with parser.read_csv(StringIO(data), chunksize=2) as reader:
|
147 |
+
reader.get_chunk()
|
148 |
+
return
|
149 |
+
|
150 |
+
with parser.read_csv(StringIO(data), chunksize=2) as reader:
|
151 |
+
result = reader.get_chunk()
|
152 |
+
|
153 |
+
expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])
|
154 |
+
tm.assert_frame_equal(result, expected)
|
155 |
+
|
156 |
+
|
157 |
+
@pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}])
|
158 |
+
def test_read_chunksize_compat(all_parsers, kwargs):
|
159 |
+
# see gh-12185
|
160 |
+
data = """index,A,B,C,D
|
161 |
+
foo,2,3,4,5
|
162 |
+
bar,7,8,9,10
|
163 |
+
baz,12,13,14,15
|
164 |
+
qux,12,13,14,15
|
165 |
+
foo2,12,13,14,15
|
166 |
+
bar2,12,13,14,15
|
167 |
+
"""
|
168 |
+
parser = all_parsers
|
169 |
+
result = parser.read_csv(StringIO(data), **kwargs)
|
170 |
+
|
171 |
+
if parser.engine == "pyarrow":
|
172 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
173 |
+
with pytest.raises(ValueError, match=msg):
|
174 |
+
with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
|
175 |
+
concat(reader)
|
176 |
+
return
|
177 |
+
|
178 |
+
with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:
|
179 |
+
via_reader = concat(reader)
|
180 |
+
tm.assert_frame_equal(via_reader, result)
|
181 |
+
|
182 |
+
|
183 |
+
def test_read_chunksize_jagged_names(all_parsers):
|
184 |
+
# see gh-23509
|
185 |
+
parser = all_parsers
|
186 |
+
data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])
|
187 |
+
|
188 |
+
expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])
|
189 |
+
|
190 |
+
if parser.engine == "pyarrow":
|
191 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
192 |
+
with pytest.raises(ValueError, match=msg):
|
193 |
+
with parser.read_csv(
|
194 |
+
StringIO(data), names=range(10), chunksize=4
|
195 |
+
) as reader:
|
196 |
+
concat(reader)
|
197 |
+
return
|
198 |
+
|
199 |
+
with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader:
|
200 |
+
result = concat(reader)
|
201 |
+
tm.assert_frame_equal(result, expected)
|
202 |
+
|
203 |
+
|
204 |
+
def test_chunk_begins_with_newline_whitespace(all_parsers):
|
205 |
+
# see gh-10022
|
206 |
+
parser = all_parsers
|
207 |
+
data = "\n hello\nworld\n"
|
208 |
+
|
209 |
+
result = parser.read_csv(StringIO(data), header=None)
|
210 |
+
expected = DataFrame([" hello", "world"])
|
211 |
+
tm.assert_frame_equal(result, expected)
|
212 |
+
|
213 |
+
|
214 |
+
@pytest.mark.slow
|
215 |
+
def test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch):
|
216 |
+
# mainly an issue with the C parser
|
217 |
+
heuristic = 2**3
|
218 |
+
parser = all_parsers
|
219 |
+
integers = [str(i) for i in range(heuristic - 1)]
|
220 |
+
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
|
221 |
+
|
222 |
+
# Coercions should work without warnings.
|
223 |
+
with monkeypatch.context() as m:
|
224 |
+
m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)
|
225 |
+
result = parser.read_csv(StringIO(data))
|
226 |
+
|
227 |
+
assert type(result.a[0]) is np.float64
|
228 |
+
assert result.a.dtype == float
|
229 |
+
|
230 |
+
|
231 |
+
def test_warn_if_chunks_have_mismatched_type(all_parsers):
|
232 |
+
warning_type = None
|
233 |
+
parser = all_parsers
|
234 |
+
size = 10000
|
235 |
+
|
236 |
+
# see gh-3866: if chunks are different types and can't
|
237 |
+
# be coerced using numerical types, then issue warning.
|
238 |
+
if parser.engine == "c" and parser.low_memory:
|
239 |
+
warning_type = DtypeWarning
|
240 |
+
# Use larger size to hit warning path
|
241 |
+
size = 499999
|
242 |
+
|
243 |
+
integers = [str(i) for i in range(size)]
|
244 |
+
data = "a\n" + "\n".join(integers + ["a", "b"] + integers)
|
245 |
+
|
246 |
+
buf = StringIO(data)
|
247 |
+
|
248 |
+
if parser.engine == "pyarrow":
|
249 |
+
df = parser.read_csv(
|
250 |
+
buf,
|
251 |
+
)
|
252 |
+
else:
|
253 |
+
df = parser.read_csv_check_warnings(
|
254 |
+
warning_type,
|
255 |
+
r"Columns \(0\) have mixed types. "
|
256 |
+
"Specify dtype option on import or set low_memory=False.",
|
257 |
+
buf,
|
258 |
+
)
|
259 |
+
|
260 |
+
assert df.a.dtype == object
|
261 |
+
|
262 |
+
|
263 |
+
@pytest.mark.parametrize("iterator", [True, False])
|
264 |
+
def test_empty_with_nrows_chunksize(all_parsers, iterator):
|
265 |
+
# see gh-9535
|
266 |
+
parser = all_parsers
|
267 |
+
expected = DataFrame(columns=["foo", "bar"])
|
268 |
+
|
269 |
+
nrows = 10
|
270 |
+
data = StringIO("foo,bar\n")
|
271 |
+
|
272 |
+
if parser.engine == "pyarrow":
|
273 |
+
msg = (
|
274 |
+
"The '(nrows|chunksize)' option is not supported with the 'pyarrow' engine"
|
275 |
+
)
|
276 |
+
with pytest.raises(ValueError, match=msg):
|
277 |
+
if iterator:
|
278 |
+
with parser.read_csv(data, chunksize=nrows) as reader:
|
279 |
+
next(iter(reader))
|
280 |
+
else:
|
281 |
+
parser.read_csv(data, nrows=nrows)
|
282 |
+
return
|
283 |
+
|
284 |
+
if iterator:
|
285 |
+
with parser.read_csv(data, chunksize=nrows) as reader:
|
286 |
+
result = next(iter(reader))
|
287 |
+
else:
|
288 |
+
result = parser.read_csv(data, nrows=nrows)
|
289 |
+
|
290 |
+
tm.assert_frame_equal(result, expected)
|
291 |
+
|
292 |
+
|
293 |
+
def test_read_csv_memory_growth_chunksize(all_parsers):
|
294 |
+
# see gh-24805
|
295 |
+
#
|
296 |
+
# Let's just make sure that we don't crash
|
297 |
+
# as we iteratively process all chunks.
|
298 |
+
parser = all_parsers
|
299 |
+
|
300 |
+
with tm.ensure_clean() as path:
|
301 |
+
with open(path, "w", encoding="utf-8") as f:
|
302 |
+
for i in range(1000):
|
303 |
+
f.write(str(i) + "\n")
|
304 |
+
|
305 |
+
if parser.engine == "pyarrow":
|
306 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
307 |
+
with pytest.raises(ValueError, match=msg):
|
308 |
+
with parser.read_csv(path, chunksize=20) as result:
|
309 |
+
for _ in result:
|
310 |
+
pass
|
311 |
+
return
|
312 |
+
|
313 |
+
with parser.read_csv(path, chunksize=20) as result:
|
314 |
+
for _ in result:
|
315 |
+
pass
|
316 |
+
|
317 |
+
|
318 |
+
def test_chunksize_with_usecols_second_block_shorter(all_parsers):
|
319 |
+
# GH#21211
|
320 |
+
parser = all_parsers
|
321 |
+
data = """1,2,3,4
|
322 |
+
5,6,7,8
|
323 |
+
9,10,11
|
324 |
+
"""
|
325 |
+
|
326 |
+
if parser.engine == "pyarrow":
|
327 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
328 |
+
with pytest.raises(ValueError, match=msg):
|
329 |
+
parser.read_csv(
|
330 |
+
StringIO(data),
|
331 |
+
names=["a", "b"],
|
332 |
+
chunksize=2,
|
333 |
+
usecols=[0, 1],
|
334 |
+
header=None,
|
335 |
+
)
|
336 |
+
return
|
337 |
+
|
338 |
+
result_chunks = parser.read_csv(
|
339 |
+
StringIO(data),
|
340 |
+
names=["a", "b"],
|
341 |
+
chunksize=2,
|
342 |
+
usecols=[0, 1],
|
343 |
+
header=None,
|
344 |
+
)
|
345 |
+
|
346 |
+
expected_frames = [
|
347 |
+
DataFrame({"a": [1, 5], "b": [2, 6]}),
|
348 |
+
DataFrame({"a": [9], "b": [10]}, index=[2]),
|
349 |
+
]
|
350 |
+
|
351 |
+
for i, result in enumerate(result_chunks):
|
352 |
+
tm.assert_frame_equal(result, expected_frames[i])
|
353 |
+
|
354 |
+
|
355 |
+
def test_chunksize_second_block_shorter(all_parsers):
|
356 |
+
# GH#21211
|
357 |
+
parser = all_parsers
|
358 |
+
data = """a,b,c,d
|
359 |
+
1,2,3,4
|
360 |
+
5,6,7,8
|
361 |
+
9,10,11
|
362 |
+
"""
|
363 |
+
|
364 |
+
if parser.engine == "pyarrow":
|
365 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
366 |
+
with pytest.raises(ValueError, match=msg):
|
367 |
+
parser.read_csv(StringIO(data), chunksize=2)
|
368 |
+
return
|
369 |
+
|
370 |
+
result_chunks = parser.read_csv(StringIO(data), chunksize=2)
|
371 |
+
|
372 |
+
expected_frames = [
|
373 |
+
DataFrame({"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}),
|
374 |
+
DataFrame({"a": [9], "b": [10], "c": [11], "d": [np.nan]}, index=[2]),
|
375 |
+
]
|
376 |
+
|
377 |
+
for i, result in enumerate(result_chunks):
|
378 |
+
tm.assert_frame_equal(result, expected_frames[i])
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_common_basic.py
ADDED
@@ -0,0 +1,979 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on both the Python and C engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
from datetime import datetime
|
6 |
+
from inspect import signature
|
7 |
+
from io import StringIO
|
8 |
+
import os
|
9 |
+
from pathlib import Path
|
10 |
+
import sys
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
import pytest
|
14 |
+
|
15 |
+
from pandas.errors import (
|
16 |
+
EmptyDataError,
|
17 |
+
ParserError,
|
18 |
+
ParserWarning,
|
19 |
+
)
|
20 |
+
|
21 |
+
from pandas import (
|
22 |
+
DataFrame,
|
23 |
+
Index,
|
24 |
+
Timestamp,
|
25 |
+
compat,
|
26 |
+
)
|
27 |
+
import pandas._testing as tm
|
28 |
+
|
29 |
+
from pandas.io.parsers import TextFileReader
|
30 |
+
from pandas.io.parsers.c_parser_wrapper import CParserWrapper
|
31 |
+
|
32 |
+
pytestmark = pytest.mark.filterwarnings(
|
33 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
34 |
+
)
|
35 |
+
|
36 |
+
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
|
37 |
+
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
|
38 |
+
|
39 |
+
|
40 |
+
def test_override_set_noconvert_columns():
|
41 |
+
# see gh-17351
|
42 |
+
#
|
43 |
+
# Usecols needs to be sorted in _set_noconvert_columns based
|
44 |
+
# on the test_usecols_with_parse_dates test from test_usecols.py
|
45 |
+
class MyTextFileReader(TextFileReader):
|
46 |
+
def __init__(self) -> None:
|
47 |
+
self._currow = 0
|
48 |
+
self.squeeze = False
|
49 |
+
|
50 |
+
class MyCParserWrapper(CParserWrapper):
|
51 |
+
def _set_noconvert_columns(self):
|
52 |
+
if self.usecols_dtype == "integer":
|
53 |
+
# self.usecols is a set, which is documented as unordered
|
54 |
+
# but in practice, a CPython set of integers is sorted.
|
55 |
+
# In other implementations this assumption does not hold.
|
56 |
+
# The following code simulates a different order, which
|
57 |
+
# before GH 17351 would cause the wrong columns to be
|
58 |
+
# converted via the parse_dates parameter
|
59 |
+
self.usecols = list(self.usecols)
|
60 |
+
self.usecols.reverse()
|
61 |
+
return CParserWrapper._set_noconvert_columns(self)
|
62 |
+
|
63 |
+
data = """a,b,c,d,e
|
64 |
+
0,1,2014-01-01,09:00,4
|
65 |
+
0,1,2014-01-02,10:00,4"""
|
66 |
+
|
67 |
+
parse_dates = [[1, 2]]
|
68 |
+
cols = {
|
69 |
+
"a": [0, 0],
|
70 |
+
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
|
71 |
+
}
|
72 |
+
expected = DataFrame(cols, columns=["c_d", "a"])
|
73 |
+
|
74 |
+
parser = MyTextFileReader()
|
75 |
+
parser.options = {
|
76 |
+
"usecols": [0, 2, 3],
|
77 |
+
"parse_dates": parse_dates,
|
78 |
+
"delimiter": ",",
|
79 |
+
}
|
80 |
+
parser.engine = "c"
|
81 |
+
parser._engine = MyCParserWrapper(StringIO(data), **parser.options)
|
82 |
+
|
83 |
+
result = parser.read()
|
84 |
+
tm.assert_frame_equal(result, expected)
|
85 |
+
|
86 |
+
|
87 |
+
def test_read_csv_local(all_parsers, csv1):
|
88 |
+
prefix = "file:///" if compat.is_platform_windows() else "file://"
|
89 |
+
parser = all_parsers
|
90 |
+
|
91 |
+
fname = prefix + str(os.path.abspath(csv1))
|
92 |
+
result = parser.read_csv(fname, index_col=0, parse_dates=True)
|
93 |
+
# TODO: make unit check more specific
|
94 |
+
if parser.engine == "pyarrow":
|
95 |
+
result.index = result.index.as_unit("ns")
|
96 |
+
expected = DataFrame(
|
97 |
+
[
|
98 |
+
[0.980269, 3.685731, -0.364216805298, -1.159738],
|
99 |
+
[1.047916, -0.041232, -0.16181208307, 0.212549],
|
100 |
+
[0.498581, 0.731168, -0.537677223318, 1.346270],
|
101 |
+
[1.120202, 1.567621, 0.00364077397681, 0.675253],
|
102 |
+
[-0.487094, 0.571455, -1.6116394093, 0.103469],
|
103 |
+
[0.836649, 0.246462, 0.588542635376, 1.062782],
|
104 |
+
[-0.157161, 1.340307, 1.1957779562, -1.097007],
|
105 |
+
],
|
106 |
+
columns=["A", "B", "C", "D"],
|
107 |
+
index=Index(
|
108 |
+
[
|
109 |
+
datetime(2000, 1, 3),
|
110 |
+
datetime(2000, 1, 4),
|
111 |
+
datetime(2000, 1, 5),
|
112 |
+
datetime(2000, 1, 6),
|
113 |
+
datetime(2000, 1, 7),
|
114 |
+
datetime(2000, 1, 10),
|
115 |
+
datetime(2000, 1, 11),
|
116 |
+
],
|
117 |
+
name="index",
|
118 |
+
),
|
119 |
+
)
|
120 |
+
tm.assert_frame_equal(result, expected)
|
121 |
+
|
122 |
+
|
123 |
+
def test_1000_sep(all_parsers):
|
124 |
+
parser = all_parsers
|
125 |
+
data = """A|B|C
|
126 |
+
1|2,334|5
|
127 |
+
10|13|10.
|
128 |
+
"""
|
129 |
+
expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})
|
130 |
+
|
131 |
+
if parser.engine == "pyarrow":
|
132 |
+
msg = "The 'thousands' option is not supported with the 'pyarrow' engine"
|
133 |
+
with pytest.raises(ValueError, match=msg):
|
134 |
+
parser.read_csv(StringIO(data), sep="|", thousands=",")
|
135 |
+
return
|
136 |
+
|
137 |
+
result = parser.read_csv(StringIO(data), sep="|", thousands=",")
|
138 |
+
tm.assert_frame_equal(result, expected)
|
139 |
+
|
140 |
+
|
141 |
+
@xfail_pyarrow # ValueError: Found non-unique column index
|
142 |
+
def test_unnamed_columns(all_parsers):
|
143 |
+
data = """A,B,C,,
|
144 |
+
1,2,3,4,5
|
145 |
+
6,7,8,9,10
|
146 |
+
11,12,13,14,15
|
147 |
+
"""
|
148 |
+
parser = all_parsers
|
149 |
+
expected = DataFrame(
|
150 |
+
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],
|
151 |
+
dtype=np.int64,
|
152 |
+
columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],
|
153 |
+
)
|
154 |
+
result = parser.read_csv(StringIO(data))
|
155 |
+
tm.assert_frame_equal(result, expected)
|
156 |
+
|
157 |
+
|
158 |
+
def test_csv_mixed_type(all_parsers):
|
159 |
+
data = """A,B,C
|
160 |
+
a,1,2
|
161 |
+
b,3,4
|
162 |
+
c,4,5
|
163 |
+
"""
|
164 |
+
parser = all_parsers
|
165 |
+
expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})
|
166 |
+
result = parser.read_csv(StringIO(data))
|
167 |
+
tm.assert_frame_equal(result, expected)
|
168 |
+
|
169 |
+
|
170 |
+
def test_read_csv_low_memory_no_rows_with_index(all_parsers):
|
171 |
+
# see gh-21141
|
172 |
+
parser = all_parsers
|
173 |
+
|
174 |
+
if not parser.low_memory:
|
175 |
+
pytest.skip("This is a low-memory specific test")
|
176 |
+
|
177 |
+
data = """A,B,C
|
178 |
+
1,1,1,2
|
179 |
+
2,2,3,4
|
180 |
+
3,3,4,5
|
181 |
+
"""
|
182 |
+
|
183 |
+
if parser.engine == "pyarrow":
|
184 |
+
msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
|
185 |
+
with pytest.raises(ValueError, match=msg):
|
186 |
+
parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
|
187 |
+
return
|
188 |
+
|
189 |
+
result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)
|
190 |
+
expected = DataFrame(columns=["A", "B", "C"])
|
191 |
+
tm.assert_frame_equal(result, expected)
|
192 |
+
|
193 |
+
|
194 |
+
def test_read_csv_dataframe(all_parsers, csv1):
|
195 |
+
parser = all_parsers
|
196 |
+
result = parser.read_csv(csv1, index_col=0, parse_dates=True)
|
197 |
+
# TODO: make unit check more specific
|
198 |
+
if parser.engine == "pyarrow":
|
199 |
+
result.index = result.index.as_unit("ns")
|
200 |
+
expected = DataFrame(
|
201 |
+
[
|
202 |
+
[0.980269, 3.685731, -0.364216805298, -1.159738],
|
203 |
+
[1.047916, -0.041232, -0.16181208307, 0.212549],
|
204 |
+
[0.498581, 0.731168, -0.537677223318, 1.346270],
|
205 |
+
[1.120202, 1.567621, 0.00364077397681, 0.675253],
|
206 |
+
[-0.487094, 0.571455, -1.6116394093, 0.103469],
|
207 |
+
[0.836649, 0.246462, 0.588542635376, 1.062782],
|
208 |
+
[-0.157161, 1.340307, 1.1957779562, -1.097007],
|
209 |
+
],
|
210 |
+
columns=["A", "B", "C", "D"],
|
211 |
+
index=Index(
|
212 |
+
[
|
213 |
+
datetime(2000, 1, 3),
|
214 |
+
datetime(2000, 1, 4),
|
215 |
+
datetime(2000, 1, 5),
|
216 |
+
datetime(2000, 1, 6),
|
217 |
+
datetime(2000, 1, 7),
|
218 |
+
datetime(2000, 1, 10),
|
219 |
+
datetime(2000, 1, 11),
|
220 |
+
],
|
221 |
+
name="index",
|
222 |
+
),
|
223 |
+
)
|
224 |
+
tm.assert_frame_equal(result, expected)
|
225 |
+
|
226 |
+
|
227 |
+
@pytest.mark.parametrize("nrows", [3, 3.0])
|
228 |
+
def test_read_nrows(all_parsers, nrows):
|
229 |
+
# see gh-10476
|
230 |
+
data = """index,A,B,C,D
|
231 |
+
foo,2,3,4,5
|
232 |
+
bar,7,8,9,10
|
233 |
+
baz,12,13,14,15
|
234 |
+
qux,12,13,14,15
|
235 |
+
foo2,12,13,14,15
|
236 |
+
bar2,12,13,14,15
|
237 |
+
"""
|
238 |
+
expected = DataFrame(
|
239 |
+
[["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],
|
240 |
+
columns=["index", "A", "B", "C", "D"],
|
241 |
+
)
|
242 |
+
parser = all_parsers
|
243 |
+
|
244 |
+
if parser.engine == "pyarrow":
|
245 |
+
msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
|
246 |
+
with pytest.raises(ValueError, match=msg):
|
247 |
+
parser.read_csv(StringIO(data), nrows=nrows)
|
248 |
+
return
|
249 |
+
|
250 |
+
result = parser.read_csv(StringIO(data), nrows=nrows)
|
251 |
+
tm.assert_frame_equal(result, expected)
|
252 |
+
|
253 |
+
|
254 |
+
@pytest.mark.parametrize("nrows", [1.2, "foo", -1])
|
255 |
+
def test_read_nrows_bad(all_parsers, nrows):
|
256 |
+
data = """index,A,B,C,D
|
257 |
+
foo,2,3,4,5
|
258 |
+
bar,7,8,9,10
|
259 |
+
baz,12,13,14,15
|
260 |
+
qux,12,13,14,15
|
261 |
+
foo2,12,13,14,15
|
262 |
+
bar2,12,13,14,15
|
263 |
+
"""
|
264 |
+
msg = r"'nrows' must be an integer >=0"
|
265 |
+
parser = all_parsers
|
266 |
+
if parser.engine == "pyarrow":
|
267 |
+
msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
|
268 |
+
|
269 |
+
with pytest.raises(ValueError, match=msg):
|
270 |
+
parser.read_csv(StringIO(data), nrows=nrows)
|
271 |
+
|
272 |
+
|
273 |
+
def test_nrows_skipfooter_errors(all_parsers):
|
274 |
+
msg = "'skipfooter' not supported with 'nrows'"
|
275 |
+
data = "a\n1\n2\n3\n4\n5\n6"
|
276 |
+
parser = all_parsers
|
277 |
+
|
278 |
+
with pytest.raises(ValueError, match=msg):
|
279 |
+
parser.read_csv(StringIO(data), skipfooter=1, nrows=5)
|
280 |
+
|
281 |
+
|
282 |
+
@skip_pyarrow
|
283 |
+
def test_missing_trailing_delimiters(all_parsers):
|
284 |
+
parser = all_parsers
|
285 |
+
data = """A,B,C,D
|
286 |
+
1,2,3,4
|
287 |
+
1,3,3,
|
288 |
+
1,4,5"""
|
289 |
+
|
290 |
+
result = parser.read_csv(StringIO(data))
|
291 |
+
expected = DataFrame(
|
292 |
+
[[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],
|
293 |
+
columns=["A", "B", "C", "D"],
|
294 |
+
)
|
295 |
+
tm.assert_frame_equal(result, expected)
|
296 |
+
|
297 |
+
|
298 |
+
def test_skip_initial_space(all_parsers):
|
299 |
+
data = (
|
300 |
+
'"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
|
301 |
+
"1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "
|
302 |
+
"314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "
|
303 |
+
"70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "
|
304 |
+
"0.212036, 14.7674, 41.605, -9999.0, -9999.0, "
|
305 |
+
"-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"
|
306 |
+
)
|
307 |
+
parser = all_parsers
|
308 |
+
|
309 |
+
if parser.engine == "pyarrow":
|
310 |
+
msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"
|
311 |
+
with pytest.raises(ValueError, match=msg):
|
312 |
+
parser.read_csv(
|
313 |
+
StringIO(data),
|
314 |
+
names=list(range(33)),
|
315 |
+
header=None,
|
316 |
+
na_values=["-9999.0"],
|
317 |
+
skipinitialspace=True,
|
318 |
+
)
|
319 |
+
return
|
320 |
+
|
321 |
+
result = parser.read_csv(
|
322 |
+
StringIO(data),
|
323 |
+
names=list(range(33)),
|
324 |
+
header=None,
|
325 |
+
na_values=["-9999.0"],
|
326 |
+
skipinitialspace=True,
|
327 |
+
)
|
328 |
+
expected = DataFrame(
|
329 |
+
[
|
330 |
+
[
|
331 |
+
"09-Apr-2012",
|
332 |
+
"01:10:18.300",
|
333 |
+
2456026.548822908,
|
334 |
+
12849,
|
335 |
+
1.00361,
|
336 |
+
1.12551,
|
337 |
+
330.65659,
|
338 |
+
355626618.16711,
|
339 |
+
73.48821,
|
340 |
+
314.11625,
|
341 |
+
1917.09447,
|
342 |
+
179.71425,
|
343 |
+
80.0,
|
344 |
+
240.0,
|
345 |
+
-350,
|
346 |
+
70.06056,
|
347 |
+
344.9837,
|
348 |
+
1,
|
349 |
+
1,
|
350 |
+
-0.689265,
|
351 |
+
-0.692787,
|
352 |
+
0.212036,
|
353 |
+
14.7674,
|
354 |
+
41.605,
|
355 |
+
np.nan,
|
356 |
+
np.nan,
|
357 |
+
np.nan,
|
358 |
+
np.nan,
|
359 |
+
np.nan,
|
360 |
+
np.nan,
|
361 |
+
0,
|
362 |
+
12,
|
363 |
+
128,
|
364 |
+
]
|
365 |
+
]
|
366 |
+
)
|
367 |
+
tm.assert_frame_equal(result, expected)
|
368 |
+
|
369 |
+
|
370 |
+
@skip_pyarrow
|
371 |
+
def test_trailing_delimiters(all_parsers):
|
372 |
+
# see gh-2442
|
373 |
+
data = """A,B,C
|
374 |
+
1,2,3,
|
375 |
+
4,5,6,
|
376 |
+
7,8,9,"""
|
377 |
+
parser = all_parsers
|
378 |
+
result = parser.read_csv(StringIO(data), index_col=False)
|
379 |
+
|
380 |
+
expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})
|
381 |
+
tm.assert_frame_equal(result, expected)
|
382 |
+
|
383 |
+
|
384 |
+
def test_escapechar(all_parsers):
|
385 |
+
# https://stackoverflow.com/questions/13824840/feature-request-for-
|
386 |
+
# pandas-read-csv
|
387 |
+
data = '''SEARCH_TERM,ACTUAL_URL
|
388 |
+
"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
|
389 |
+
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
|
390 |
+
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
|
391 |
+
|
392 |
+
parser = all_parsers
|
393 |
+
result = parser.read_csv(
|
394 |
+
StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"
|
395 |
+
)
|
396 |
+
|
397 |
+
assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'
|
398 |
+
|
399 |
+
tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))
|
400 |
+
|
401 |
+
|
402 |
+
def test_ignore_leading_whitespace(all_parsers):
|
403 |
+
# see gh-3374, gh-6607
|
404 |
+
parser = all_parsers
|
405 |
+
data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"
|
406 |
+
|
407 |
+
if parser.engine == "pyarrow":
|
408 |
+
msg = "the 'pyarrow' engine does not support regex separators"
|
409 |
+
with pytest.raises(ValueError, match=msg):
|
410 |
+
parser.read_csv(StringIO(data), sep=r"\s+")
|
411 |
+
return
|
412 |
+
result = parser.read_csv(StringIO(data), sep=r"\s+")
|
413 |
+
|
414 |
+
expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})
|
415 |
+
tm.assert_frame_equal(result, expected)
|
416 |
+
|
417 |
+
|
418 |
+
@skip_pyarrow
|
419 |
+
@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])
|
420 |
+
def test_uneven_lines_with_usecols(all_parsers, usecols):
|
421 |
+
# see gh-12203
|
422 |
+
parser = all_parsers
|
423 |
+
data = r"""a,b,c
|
424 |
+
0,1,2
|
425 |
+
3,4,5,6,7
|
426 |
+
8,9,10"""
|
427 |
+
|
428 |
+
if usecols is None:
|
429 |
+
# Make sure that an error is still raised
|
430 |
+
# when the "usecols" parameter is not provided.
|
431 |
+
msg = r"Expected \d+ fields in line \d+, saw \d+"
|
432 |
+
with pytest.raises(ParserError, match=msg):
|
433 |
+
parser.read_csv(StringIO(data))
|
434 |
+
else:
|
435 |
+
expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})
|
436 |
+
|
437 |
+
result = parser.read_csv(StringIO(data), usecols=usecols)
|
438 |
+
tm.assert_frame_equal(result, expected)
|
439 |
+
|
440 |
+
|
441 |
+
@skip_pyarrow
|
442 |
+
@pytest.mark.parametrize(
|
443 |
+
"data,kwargs,expected",
|
444 |
+
[
|
445 |
+
# First, check to see that the response of parser when faced with no
|
446 |
+
# provided columns raises the correct error, with or without usecols.
|
447 |
+
("", {}, None),
|
448 |
+
("", {"usecols": ["X"]}, None),
|
449 |
+
(
|
450 |
+
",,",
|
451 |
+
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
|
452 |
+
DataFrame(columns=["X"], index=[0], dtype=np.float64),
|
453 |
+
),
|
454 |
+
(
|
455 |
+
"",
|
456 |
+
{"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},
|
457 |
+
DataFrame(columns=["X"]),
|
458 |
+
),
|
459 |
+
],
|
460 |
+
)
|
461 |
+
def test_read_empty_with_usecols(all_parsers, data, kwargs, expected):
|
462 |
+
# see gh-12493
|
463 |
+
parser = all_parsers
|
464 |
+
|
465 |
+
if expected is None:
|
466 |
+
msg = "No columns to parse from file"
|
467 |
+
with pytest.raises(EmptyDataError, match=msg):
|
468 |
+
parser.read_csv(StringIO(data), **kwargs)
|
469 |
+
else:
|
470 |
+
result = parser.read_csv(StringIO(data), **kwargs)
|
471 |
+
tm.assert_frame_equal(result, expected)
|
472 |
+
|
473 |
+
|
474 |
+
@pytest.mark.parametrize(
|
475 |
+
"kwargs,expected",
|
476 |
+
[
|
477 |
+
# gh-8661, gh-8679: this should ignore six lines, including
|
478 |
+
# lines with trailing whitespace and blank lines.
|
479 |
+
(
|
480 |
+
{
|
481 |
+
"header": None,
|
482 |
+
"delim_whitespace": True,
|
483 |
+
"skiprows": [0, 1, 2, 3, 5, 6],
|
484 |
+
"skip_blank_lines": True,
|
485 |
+
},
|
486 |
+
DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),
|
487 |
+
),
|
488 |
+
# gh-8983: test skipping set of rows after a row with trailing spaces.
|
489 |
+
(
|
490 |
+
{
|
491 |
+
"delim_whitespace": True,
|
492 |
+
"skiprows": [1, 2, 3, 5, 6],
|
493 |
+
"skip_blank_lines": True,
|
494 |
+
},
|
495 |
+
DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),
|
496 |
+
),
|
497 |
+
],
|
498 |
+
)
|
499 |
+
def test_trailing_spaces(all_parsers, kwargs, expected):
|
500 |
+
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa: E501
|
501 |
+
parser = all_parsers
|
502 |
+
|
503 |
+
depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
|
504 |
+
|
505 |
+
if parser.engine == "pyarrow":
|
506 |
+
msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine"
|
507 |
+
with pytest.raises(ValueError, match=msg):
|
508 |
+
with tm.assert_produces_warning(
|
509 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
510 |
+
):
|
511 |
+
parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
|
512 |
+
return
|
513 |
+
|
514 |
+
with tm.assert_produces_warning(
|
515 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
516 |
+
):
|
517 |
+
result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)
|
518 |
+
tm.assert_frame_equal(result, expected)
|
519 |
+
|
520 |
+
|
521 |
+
def test_raise_on_sep_with_delim_whitespace(all_parsers):
|
522 |
+
# see gh-6607
|
523 |
+
data = "a b c\n1 2 3"
|
524 |
+
parser = all_parsers
|
525 |
+
|
526 |
+
depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
|
527 |
+
with pytest.raises(ValueError, match="you can only specify one"):
|
528 |
+
with tm.assert_produces_warning(
|
529 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
530 |
+
):
|
531 |
+
parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)
|
532 |
+
|
533 |
+
|
534 |
+
def test_read_filepath_or_buffer(all_parsers):
|
535 |
+
# see gh-43366
|
536 |
+
parser = all_parsers
|
537 |
+
|
538 |
+
with pytest.raises(TypeError, match="Expected file path name or file-like"):
|
539 |
+
parser.read_csv(filepath_or_buffer=b"input")
|
540 |
+
|
541 |
+
|
542 |
+
@pytest.mark.parametrize("delim_whitespace", [True, False])
|
543 |
+
def test_single_char_leading_whitespace(all_parsers, delim_whitespace):
|
544 |
+
# see gh-9710
|
545 |
+
parser = all_parsers
|
546 |
+
data = """\
|
547 |
+
MyColumn
|
548 |
+
a
|
549 |
+
b
|
550 |
+
a
|
551 |
+
b\n"""
|
552 |
+
|
553 |
+
expected = DataFrame({"MyColumn": list("abab")})
|
554 |
+
depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
|
555 |
+
|
556 |
+
if parser.engine == "pyarrow":
|
557 |
+
msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"
|
558 |
+
with pytest.raises(ValueError, match=msg):
|
559 |
+
with tm.assert_produces_warning(
|
560 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
561 |
+
):
|
562 |
+
parser.read_csv(
|
563 |
+
StringIO(data),
|
564 |
+
skipinitialspace=True,
|
565 |
+
delim_whitespace=delim_whitespace,
|
566 |
+
)
|
567 |
+
return
|
568 |
+
|
569 |
+
with tm.assert_produces_warning(
|
570 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
571 |
+
):
|
572 |
+
result = parser.read_csv(
|
573 |
+
StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace
|
574 |
+
)
|
575 |
+
tm.assert_frame_equal(result, expected)
|
576 |
+
|
577 |
+
|
578 |
+
@pytest.mark.parametrize(
|
579 |
+
"sep,skip_blank_lines,exp_data",
|
580 |
+
[
|
581 |
+
(",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
|
582 |
+
(r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),
|
583 |
+
(
|
584 |
+
",",
|
585 |
+
False,
|
586 |
+
[
|
587 |
+
[1.0, 2.0, 4.0],
|
588 |
+
[np.nan, np.nan, np.nan],
|
589 |
+
[np.nan, np.nan, np.nan],
|
590 |
+
[5.0, np.nan, 10.0],
|
591 |
+
[np.nan, np.nan, np.nan],
|
592 |
+
[-70.0, 0.4, 1.0],
|
593 |
+
],
|
594 |
+
),
|
595 |
+
],
|
596 |
+
)
|
597 |
+
def test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data, request):
|
598 |
+
parser = all_parsers
|
599 |
+
data = """\
|
600 |
+
A,B,C
|
601 |
+
1,2.,4.
|
602 |
+
|
603 |
+
|
604 |
+
5.,NaN,10.0
|
605 |
+
|
606 |
+
-70,.4,1
|
607 |
+
"""
|
608 |
+
|
609 |
+
if sep == r"\s+":
|
610 |
+
data = data.replace(",", " ")
|
611 |
+
|
612 |
+
if parser.engine == "pyarrow":
|
613 |
+
msg = "the 'pyarrow' engine does not support regex separators"
|
614 |
+
with pytest.raises(ValueError, match=msg):
|
615 |
+
parser.read_csv(
|
616 |
+
StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines
|
617 |
+
)
|
618 |
+
return
|
619 |
+
|
620 |
+
result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)
|
621 |
+
expected = DataFrame(exp_data, columns=["A", "B", "C"])
|
622 |
+
tm.assert_frame_equal(result, expected)
|
623 |
+
|
624 |
+
|
625 |
+
@skip_pyarrow
|
626 |
+
def test_whitespace_lines(all_parsers):
|
627 |
+
parser = all_parsers
|
628 |
+
data = """
|
629 |
+
|
630 |
+
\t \t\t
|
631 |
+
\t
|
632 |
+
A,B,C
|
633 |
+
\t 1,2.,4.
|
634 |
+
5.,NaN,10.0
|
635 |
+
"""
|
636 |
+
expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])
|
637 |
+
result = parser.read_csv(StringIO(data))
|
638 |
+
tm.assert_frame_equal(result, expected)
|
639 |
+
|
640 |
+
|
641 |
+
@pytest.mark.parametrize(
|
642 |
+
"data,expected",
|
643 |
+
[
|
644 |
+
(
|
645 |
+
""" A B C D
|
646 |
+
a 1 2 3 4
|
647 |
+
b 1 2 3 4
|
648 |
+
c 1 2 3 4
|
649 |
+
""",
|
650 |
+
DataFrame(
|
651 |
+
[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
|
652 |
+
columns=["A", "B", "C", "D"],
|
653 |
+
index=["a", "b", "c"],
|
654 |
+
),
|
655 |
+
),
|
656 |
+
(
|
657 |
+
" a b c\n1 2 3 \n4 5 6\n 7 8 9",
|
658 |
+
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),
|
659 |
+
),
|
660 |
+
],
|
661 |
+
)
|
662 |
+
def test_whitespace_regex_separator(all_parsers, data, expected):
|
663 |
+
# see gh-6607
|
664 |
+
parser = all_parsers
|
665 |
+
if parser.engine == "pyarrow":
|
666 |
+
msg = "the 'pyarrow' engine does not support regex separators"
|
667 |
+
with pytest.raises(ValueError, match=msg):
|
668 |
+
parser.read_csv(StringIO(data), sep=r"\s+")
|
669 |
+
return
|
670 |
+
|
671 |
+
result = parser.read_csv(StringIO(data), sep=r"\s+")
|
672 |
+
tm.assert_frame_equal(result, expected)
|
673 |
+
|
674 |
+
|
675 |
+
def test_sub_character(all_parsers, csv_dir_path):
|
676 |
+
# see gh-16893
|
677 |
+
filename = os.path.join(csv_dir_path, "sub_char.csv")
|
678 |
+
expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])
|
679 |
+
|
680 |
+
parser = all_parsers
|
681 |
+
result = parser.read_csv(filename)
|
682 |
+
tm.assert_frame_equal(result, expected)
|
683 |
+
|
684 |
+
|
685 |
+
@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])
|
686 |
+
def test_filename_with_special_chars(all_parsers, filename):
|
687 |
+
# see gh-15086.
|
688 |
+
parser = all_parsers
|
689 |
+
df = DataFrame({"a": [1, 2, 3]})
|
690 |
+
|
691 |
+
with tm.ensure_clean(filename) as path:
|
692 |
+
df.to_csv(path, index=False)
|
693 |
+
|
694 |
+
result = parser.read_csv(path)
|
695 |
+
tm.assert_frame_equal(result, df)
|
696 |
+
|
697 |
+
|
698 |
+
def test_read_table_same_signature_as_read_csv(all_parsers):
|
699 |
+
# GH-34976
|
700 |
+
parser = all_parsers
|
701 |
+
|
702 |
+
table_sign = signature(parser.read_table)
|
703 |
+
csv_sign = signature(parser.read_csv)
|
704 |
+
|
705 |
+
assert table_sign.parameters.keys() == csv_sign.parameters.keys()
|
706 |
+
assert table_sign.return_annotation == csv_sign.return_annotation
|
707 |
+
|
708 |
+
for key, csv_param in csv_sign.parameters.items():
|
709 |
+
table_param = table_sign.parameters[key]
|
710 |
+
if key == "sep":
|
711 |
+
assert csv_param.default == ","
|
712 |
+
assert table_param.default == "\t"
|
713 |
+
assert table_param.annotation == csv_param.annotation
|
714 |
+
assert table_param.kind == csv_param.kind
|
715 |
+
continue
|
716 |
+
|
717 |
+
assert table_param == csv_param
|
718 |
+
|
719 |
+
|
720 |
+
def test_read_table_equivalency_to_read_csv(all_parsers):
|
721 |
+
# see gh-21948
|
722 |
+
# As of 0.25.0, read_table is undeprecated
|
723 |
+
parser = all_parsers
|
724 |
+
data = "a\tb\n1\t2\n3\t4"
|
725 |
+
expected = parser.read_csv(StringIO(data), sep="\t")
|
726 |
+
result = parser.read_table(StringIO(data))
|
727 |
+
tm.assert_frame_equal(result, expected)
|
728 |
+
|
729 |
+
|
730 |
+
@pytest.mark.parametrize("read_func", ["read_csv", "read_table"])
|
731 |
+
def test_read_csv_and_table_sys_setprofile(all_parsers, read_func):
|
732 |
+
# GH#41069
|
733 |
+
parser = all_parsers
|
734 |
+
data = "a b\n0 1"
|
735 |
+
|
736 |
+
sys.setprofile(lambda *a, **k: None)
|
737 |
+
result = getattr(parser, read_func)(StringIO(data))
|
738 |
+
sys.setprofile(None)
|
739 |
+
|
740 |
+
expected = DataFrame({"a b": ["0 1"]})
|
741 |
+
tm.assert_frame_equal(result, expected)
|
742 |
+
|
743 |
+
|
744 |
+
@skip_pyarrow
|
745 |
+
def test_first_row_bom(all_parsers):
|
746 |
+
# see gh-26545
|
747 |
+
parser = all_parsers
|
748 |
+
data = '''\ufeff"Head1"\t"Head2"\t"Head3"'''
|
749 |
+
|
750 |
+
result = parser.read_csv(StringIO(data), delimiter="\t")
|
751 |
+
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
|
752 |
+
tm.assert_frame_equal(result, expected)
|
753 |
+
|
754 |
+
|
755 |
+
@skip_pyarrow
|
756 |
+
def test_first_row_bom_unquoted(all_parsers):
|
757 |
+
# see gh-36343
|
758 |
+
parser = all_parsers
|
759 |
+
data = """\ufeffHead1\tHead2\tHead3"""
|
760 |
+
|
761 |
+
result = parser.read_csv(StringIO(data), delimiter="\t")
|
762 |
+
expected = DataFrame(columns=["Head1", "Head2", "Head3"])
|
763 |
+
tm.assert_frame_equal(result, expected)
|
764 |
+
|
765 |
+
|
766 |
+
@pytest.mark.parametrize("nrows", range(1, 6))
|
767 |
+
def test_blank_lines_between_header_and_data_rows(all_parsers, nrows):
|
768 |
+
# GH 28071
|
769 |
+
ref = DataFrame(
|
770 |
+
[[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],
|
771 |
+
columns=list("ab"),
|
772 |
+
)
|
773 |
+
csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"
|
774 |
+
parser = all_parsers
|
775 |
+
|
776 |
+
if parser.engine == "pyarrow":
|
777 |
+
msg = "The 'nrows' option is not supported with the 'pyarrow' engine"
|
778 |
+
with pytest.raises(ValueError, match=msg):
|
779 |
+
parser.read_csv(
|
780 |
+
StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False
|
781 |
+
)
|
782 |
+
return
|
783 |
+
|
784 |
+
df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)
|
785 |
+
tm.assert_frame_equal(df, ref[:nrows])
|
786 |
+
|
787 |
+
|
788 |
+
@skip_pyarrow
|
789 |
+
def test_no_header_two_extra_columns(all_parsers):
|
790 |
+
# GH 26218
|
791 |
+
column_names = ["one", "two", "three"]
|
792 |
+
ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)
|
793 |
+
stream = StringIO("foo,bar,baz,bam,blah")
|
794 |
+
parser = all_parsers
|
795 |
+
df = parser.read_csv_check_warnings(
|
796 |
+
ParserWarning,
|
797 |
+
"Length of header or names does not match length of data. "
|
798 |
+
"This leads to a loss of data with index_col=False.",
|
799 |
+
stream,
|
800 |
+
header=None,
|
801 |
+
names=column_names,
|
802 |
+
index_col=False,
|
803 |
+
)
|
804 |
+
tm.assert_frame_equal(df, ref)
|
805 |
+
|
806 |
+
|
807 |
+
def test_read_csv_names_not_accepting_sets(all_parsers):
|
808 |
+
# GH 34946
|
809 |
+
data = """\
|
810 |
+
1,2,3
|
811 |
+
4,5,6\n"""
|
812 |
+
parser = all_parsers
|
813 |
+
with pytest.raises(ValueError, match="Names should be an ordered collection."):
|
814 |
+
parser.read_csv(StringIO(data), names=set("QAZ"))
|
815 |
+
|
816 |
+
|
817 |
+
def test_read_table_delim_whitespace_default_sep(all_parsers):
|
818 |
+
# GH: 35958
|
819 |
+
f = StringIO("a b c\n1 -2 -3\n4 5 6")
|
820 |
+
parser = all_parsers
|
821 |
+
|
822 |
+
depr_msg = "The 'delim_whitespace' keyword in pd.read_table is deprecated"
|
823 |
+
|
824 |
+
if parser.engine == "pyarrow":
|
825 |
+
msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine"
|
826 |
+
with pytest.raises(ValueError, match=msg):
|
827 |
+
with tm.assert_produces_warning(
|
828 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
829 |
+
):
|
830 |
+
parser.read_table(f, delim_whitespace=True)
|
831 |
+
return
|
832 |
+
with tm.assert_produces_warning(
|
833 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
834 |
+
):
|
835 |
+
result = parser.read_table(f, delim_whitespace=True)
|
836 |
+
expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})
|
837 |
+
tm.assert_frame_equal(result, expected)
|
838 |
+
|
839 |
+
|
840 |
+
@pytest.mark.parametrize("delimiter", [",", "\t"])
|
841 |
+
def test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):
|
842 |
+
# GH: 35958
|
843 |
+
f = StringIO("a b c\n1 -2 -3\n4 5 6")
|
844 |
+
parser = all_parsers
|
845 |
+
msg = (
|
846 |
+
"Specified a delimiter with both sep and "
|
847 |
+
"delim_whitespace=True; you can only specify one."
|
848 |
+
)
|
849 |
+
depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
|
850 |
+
with tm.assert_produces_warning(
|
851 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
852 |
+
):
|
853 |
+
with pytest.raises(ValueError, match=msg):
|
854 |
+
parser.read_csv(f, delim_whitespace=True, sep=delimiter)
|
855 |
+
|
856 |
+
with pytest.raises(ValueError, match=msg):
|
857 |
+
parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)
|
858 |
+
|
859 |
+
|
860 |
+
def test_read_csv_delimiter_and_sep_no_default(all_parsers):
|
861 |
+
# GH#39823
|
862 |
+
f = StringIO("a,b\n1,2")
|
863 |
+
parser = all_parsers
|
864 |
+
msg = "Specified a sep and a delimiter; you can only specify one."
|
865 |
+
with pytest.raises(ValueError, match=msg):
|
866 |
+
parser.read_csv(f, sep=" ", delimiter=".")
|
867 |
+
|
868 |
+
|
869 |
+
@pytest.mark.parametrize("kwargs", [{"delimiter": "\n"}, {"sep": "\n"}])
|
870 |
+
def test_read_csv_line_break_as_separator(kwargs, all_parsers):
|
871 |
+
# GH#43528
|
872 |
+
parser = all_parsers
|
873 |
+
data = """a,b,c
|
874 |
+
1,2,3
|
875 |
+
"""
|
876 |
+
msg = (
|
877 |
+
r"Specified \\n as separator or delimiter. This forces the python engine "
|
878 |
+
r"which does not accept a line terminator. Hence it is not allowed to use "
|
879 |
+
r"the line terminator as separator."
|
880 |
+
)
|
881 |
+
with pytest.raises(ValueError, match=msg):
|
882 |
+
parser.read_csv(StringIO(data), **kwargs)
|
883 |
+
|
884 |
+
|
885 |
+
@pytest.mark.parametrize("delimiter", [",", "\t"])
|
886 |
+
def test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):
|
887 |
+
# GH: 35958
|
888 |
+
f = StringIO("a b c\n1 -2 -3\n4 5 6")
|
889 |
+
parser = all_parsers
|
890 |
+
msg = (
|
891 |
+
"Specified a delimiter with both sep and "
|
892 |
+
"delim_whitespace=True; you can only specify one."
|
893 |
+
)
|
894 |
+
depr_msg = "The 'delim_whitespace' keyword in pd.read_table is deprecated"
|
895 |
+
with tm.assert_produces_warning(
|
896 |
+
FutureWarning, match=depr_msg, check_stacklevel=False
|
897 |
+
):
|
898 |
+
with pytest.raises(ValueError, match=msg):
|
899 |
+
parser.read_table(f, delim_whitespace=True, sep=delimiter)
|
900 |
+
|
901 |
+
with pytest.raises(ValueError, match=msg):
|
902 |
+
parser.read_table(f, delim_whitespace=True, delimiter=delimiter)
|
903 |
+
|
904 |
+
|
905 |
+
@skip_pyarrow
|
906 |
+
def test_dict_keys_as_names(all_parsers):
|
907 |
+
# GH: 36928
|
908 |
+
data = "1,2"
|
909 |
+
|
910 |
+
keys = {"a": int, "b": int}.keys()
|
911 |
+
parser = all_parsers
|
912 |
+
|
913 |
+
result = parser.read_csv(StringIO(data), names=keys)
|
914 |
+
expected = DataFrame({"a": [1], "b": [2]})
|
915 |
+
tm.assert_frame_equal(result, expected)
|
916 |
+
|
917 |
+
|
918 |
+
@xfail_pyarrow # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xed in position 0
|
919 |
+
def test_encoding_surrogatepass(all_parsers):
|
920 |
+
# GH39017
|
921 |
+
parser = all_parsers
|
922 |
+
content = b"\xed\xbd\xbf"
|
923 |
+
decoded = content.decode("utf-8", errors="surrogatepass")
|
924 |
+
expected = DataFrame({decoded: [decoded]}, index=[decoded * 2])
|
925 |
+
expected.index.name = decoded * 2
|
926 |
+
|
927 |
+
with tm.ensure_clean() as path:
|
928 |
+
Path(path).write_bytes(
|
929 |
+
content * 2 + b"," + content + b"\n" + content * 2 + b"," + content
|
930 |
+
)
|
931 |
+
df = parser.read_csv(path, encoding_errors="surrogatepass", index_col=0)
|
932 |
+
tm.assert_frame_equal(df, expected)
|
933 |
+
with pytest.raises(UnicodeDecodeError, match="'utf-8' codec can't decode byte"):
|
934 |
+
parser.read_csv(path)
|
935 |
+
|
936 |
+
|
937 |
+
def test_malformed_second_line(all_parsers):
|
938 |
+
# see GH14782
|
939 |
+
parser = all_parsers
|
940 |
+
data = "\na\nb\n"
|
941 |
+
result = parser.read_csv(StringIO(data), skip_blank_lines=False, header=1)
|
942 |
+
expected = DataFrame({"a": ["b"]})
|
943 |
+
tm.assert_frame_equal(result, expected)
|
944 |
+
|
945 |
+
|
946 |
+
@skip_pyarrow
|
947 |
+
def test_short_single_line(all_parsers):
|
948 |
+
# GH 47566
|
949 |
+
parser = all_parsers
|
950 |
+
columns = ["a", "b", "c"]
|
951 |
+
data = "1,2"
|
952 |
+
result = parser.read_csv(StringIO(data), header=None, names=columns)
|
953 |
+
expected = DataFrame({"a": [1], "b": [2], "c": [np.nan]})
|
954 |
+
tm.assert_frame_equal(result, expected)
|
955 |
+
|
956 |
+
|
957 |
+
@xfail_pyarrow # ValueError: Length mismatch: Expected axis has 2 elements
|
958 |
+
def test_short_multi_line(all_parsers):
|
959 |
+
# GH 47566
|
960 |
+
parser = all_parsers
|
961 |
+
columns = ["a", "b", "c"]
|
962 |
+
data = "1,2\n1,2"
|
963 |
+
result = parser.read_csv(StringIO(data), header=None, names=columns)
|
964 |
+
expected = DataFrame({"a": [1, 1], "b": [2, 2], "c": [np.nan, np.nan]})
|
965 |
+
tm.assert_frame_equal(result, expected)
|
966 |
+
|
967 |
+
|
968 |
+
def test_read_seek(all_parsers):
|
969 |
+
# GH48646
|
970 |
+
parser = all_parsers
|
971 |
+
prefix = "### DATA\n"
|
972 |
+
content = "nkey,value\ntables,rectangular\n"
|
973 |
+
with tm.ensure_clean() as path:
|
974 |
+
Path(path).write_text(prefix + content, encoding="utf-8")
|
975 |
+
with open(path, encoding="utf-8") as file:
|
976 |
+
file.readline()
|
977 |
+
actual = parser.read_csv(file)
|
978 |
+
expected = parser.read_csv(StringIO(content))
|
979 |
+
tm.assert_frame_equal(actual, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_data_list.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on both the Python and C engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
import csv
|
6 |
+
from io import StringIO
|
7 |
+
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
from pandas import DataFrame
|
11 |
+
import pandas._testing as tm
|
12 |
+
|
13 |
+
from pandas.io.parsers import TextParser
|
14 |
+
|
15 |
+
pytestmark = pytest.mark.filterwarnings(
|
16 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
17 |
+
)
|
18 |
+
|
19 |
+
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
|
20 |
+
|
21 |
+
|
22 |
+
@xfail_pyarrow
|
23 |
+
def test_read_data_list(all_parsers):
|
24 |
+
parser = all_parsers
|
25 |
+
kwargs = {"index_col": 0}
|
26 |
+
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
|
27 |
+
|
28 |
+
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
|
29 |
+
expected = parser.read_csv(StringIO(data), **kwargs)
|
30 |
+
|
31 |
+
with TextParser(data_list, chunksize=2, **kwargs) as parser:
|
32 |
+
result = parser.read()
|
33 |
+
|
34 |
+
tm.assert_frame_equal(result, expected)
|
35 |
+
|
36 |
+
|
37 |
+
def test_reader_list(all_parsers):
|
38 |
+
data = """index,A,B,C,D
|
39 |
+
foo,2,3,4,5
|
40 |
+
bar,7,8,9,10
|
41 |
+
baz,12,13,14,15
|
42 |
+
qux,12,13,14,15
|
43 |
+
foo2,12,13,14,15
|
44 |
+
bar2,12,13,14,15
|
45 |
+
"""
|
46 |
+
parser = all_parsers
|
47 |
+
kwargs = {"index_col": 0}
|
48 |
+
|
49 |
+
lines = list(csv.reader(StringIO(data)))
|
50 |
+
with TextParser(lines, chunksize=2, **kwargs) as reader:
|
51 |
+
chunks = list(reader)
|
52 |
+
|
53 |
+
expected = parser.read_csv(StringIO(data), **kwargs)
|
54 |
+
|
55 |
+
tm.assert_frame_equal(chunks[0], expected[:2])
|
56 |
+
tm.assert_frame_equal(chunks[1], expected[2:4])
|
57 |
+
tm.assert_frame_equal(chunks[2], expected[4:])
|
58 |
+
|
59 |
+
|
60 |
+
def test_reader_list_skiprows(all_parsers):
|
61 |
+
data = """index,A,B,C,D
|
62 |
+
foo,2,3,4,5
|
63 |
+
bar,7,8,9,10
|
64 |
+
baz,12,13,14,15
|
65 |
+
qux,12,13,14,15
|
66 |
+
foo2,12,13,14,15
|
67 |
+
bar2,12,13,14,15
|
68 |
+
"""
|
69 |
+
parser = all_parsers
|
70 |
+
kwargs = {"index_col": 0}
|
71 |
+
|
72 |
+
lines = list(csv.reader(StringIO(data)))
|
73 |
+
with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:
|
74 |
+
chunks = list(reader)
|
75 |
+
|
76 |
+
expected = parser.read_csv(StringIO(data), **kwargs)
|
77 |
+
|
78 |
+
tm.assert_frame_equal(chunks[0], expected[1:3])
|
79 |
+
|
80 |
+
|
81 |
+
def test_read_csv_parse_simple_list(all_parsers):
|
82 |
+
parser = all_parsers
|
83 |
+
data = """foo
|
84 |
+
bar baz
|
85 |
+
qux foo
|
86 |
+
foo
|
87 |
+
bar"""
|
88 |
+
|
89 |
+
result = parser.read_csv(StringIO(data), header=None)
|
90 |
+
expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])
|
91 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_decimal.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on both the Python and C engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
from io import StringIO
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas import DataFrame
|
10 |
+
import pandas._testing as tm
|
11 |
+
|
12 |
+
pytestmark = pytest.mark.filterwarnings(
|
13 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
14 |
+
)
|
15 |
+
|
16 |
+
|
17 |
+
@pytest.mark.parametrize(
|
18 |
+
"data,thousands,decimal",
|
19 |
+
[
|
20 |
+
(
|
21 |
+
"""A|B|C
|
22 |
+
1|2,334.01|5
|
23 |
+
10|13|10.
|
24 |
+
""",
|
25 |
+
",",
|
26 |
+
".",
|
27 |
+
),
|
28 |
+
(
|
29 |
+
"""A|B|C
|
30 |
+
1|2.334,01|5
|
31 |
+
10|13|10,
|
32 |
+
""",
|
33 |
+
".",
|
34 |
+
",",
|
35 |
+
),
|
36 |
+
],
|
37 |
+
)
|
38 |
+
def test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):
|
39 |
+
parser = all_parsers
|
40 |
+
expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
|
41 |
+
|
42 |
+
if parser.engine == "pyarrow":
|
43 |
+
msg = "The 'thousands' option is not supported with the 'pyarrow' engine"
|
44 |
+
with pytest.raises(ValueError, match=msg):
|
45 |
+
parser.read_csv(
|
46 |
+
StringIO(data), sep="|", thousands=thousands, decimal=decimal
|
47 |
+
)
|
48 |
+
return
|
49 |
+
|
50 |
+
result = parser.read_csv(
|
51 |
+
StringIO(data), sep="|", thousands=thousands, decimal=decimal
|
52 |
+
)
|
53 |
+
tm.assert_frame_equal(result, expected)
|
54 |
+
|
55 |
+
|
56 |
+
def test_euro_decimal_format(all_parsers):
|
57 |
+
parser = all_parsers
|
58 |
+
data = """Id;Number1;Number2;Text1;Text2;Number3
|
59 |
+
1;1521,1541;187101,9543;ABC;poi;4,738797819
|
60 |
+
2;121,12;14897,76;DEF;uyt;0,377320872
|
61 |
+
3;878,158;108013,434;GHI;rez;2,735694704"""
|
62 |
+
|
63 |
+
result = parser.read_csv(StringIO(data), sep=";", decimal=",")
|
64 |
+
expected = DataFrame(
|
65 |
+
[
|
66 |
+
[1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],
|
67 |
+
[2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],
|
68 |
+
[3, 878.158, 108013.434, "GHI", "rez", 2.735694704],
|
69 |
+
],
|
70 |
+
columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
|
71 |
+
)
|
72 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_file_buffer_url.py
ADDED
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on both the Python and C engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
from io import (
|
6 |
+
BytesIO,
|
7 |
+
StringIO,
|
8 |
+
)
|
9 |
+
import os
|
10 |
+
import platform
|
11 |
+
from urllib.error import URLError
|
12 |
+
import uuid
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
import pytest
|
16 |
+
|
17 |
+
from pandas.errors import (
|
18 |
+
EmptyDataError,
|
19 |
+
ParserError,
|
20 |
+
)
|
21 |
+
import pandas.util._test_decorators as td
|
22 |
+
|
23 |
+
from pandas import (
|
24 |
+
DataFrame,
|
25 |
+
Index,
|
26 |
+
)
|
27 |
+
import pandas._testing as tm
|
28 |
+
|
29 |
+
pytestmark = pytest.mark.filterwarnings(
|
30 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
31 |
+
)
|
32 |
+
|
33 |
+
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
|
34 |
+
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
|
35 |
+
|
36 |
+
|
37 |
+
@pytest.mark.network
|
38 |
+
@pytest.mark.single_cpu
|
39 |
+
def test_url(all_parsers, csv_dir_path, httpserver):
|
40 |
+
parser = all_parsers
|
41 |
+
kwargs = {"sep": "\t"}
|
42 |
+
|
43 |
+
local_path = os.path.join(csv_dir_path, "salaries.csv")
|
44 |
+
with open(local_path, encoding="utf-8") as f:
|
45 |
+
httpserver.serve_content(content=f.read())
|
46 |
+
|
47 |
+
url_result = parser.read_csv(httpserver.url, **kwargs)
|
48 |
+
|
49 |
+
local_result = parser.read_csv(local_path, **kwargs)
|
50 |
+
tm.assert_frame_equal(url_result, local_result)
|
51 |
+
|
52 |
+
|
53 |
+
@pytest.mark.slow
|
54 |
+
def test_local_file(all_parsers, csv_dir_path):
|
55 |
+
parser = all_parsers
|
56 |
+
kwargs = {"sep": "\t"}
|
57 |
+
|
58 |
+
local_path = os.path.join(csv_dir_path, "salaries.csv")
|
59 |
+
local_result = parser.read_csv(local_path, **kwargs)
|
60 |
+
url = "file://localhost/" + local_path
|
61 |
+
|
62 |
+
try:
|
63 |
+
url_result = parser.read_csv(url, **kwargs)
|
64 |
+
tm.assert_frame_equal(url_result, local_result)
|
65 |
+
except URLError:
|
66 |
+
# Fails on some systems.
|
67 |
+
pytest.skip("Failing on: " + " ".join(platform.uname()))
|
68 |
+
|
69 |
+
|
70 |
+
@xfail_pyarrow # AssertionError: DataFrame.index are different
|
71 |
+
def test_path_path_lib(all_parsers):
|
72 |
+
parser = all_parsers
|
73 |
+
df = DataFrame(
|
74 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
75 |
+
columns=Index(list("ABCD"), dtype=object),
|
76 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
77 |
+
)
|
78 |
+
result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))
|
79 |
+
tm.assert_frame_equal(df, result)
|
80 |
+
|
81 |
+
|
82 |
+
@xfail_pyarrow # AssertionError: DataFrame.index are different
|
83 |
+
def test_path_local_path(all_parsers):
|
84 |
+
parser = all_parsers
|
85 |
+
df = DataFrame(
|
86 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
87 |
+
columns=Index(list("ABCD"), dtype=object),
|
88 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
89 |
+
)
|
90 |
+
result = tm.round_trip_localpath(
|
91 |
+
df.to_csv, lambda p: parser.read_csv(p, index_col=0)
|
92 |
+
)
|
93 |
+
tm.assert_frame_equal(df, result)
|
94 |
+
|
95 |
+
|
96 |
+
def test_nonexistent_path(all_parsers):
|
97 |
+
# gh-2428: pls no segfault
|
98 |
+
# gh-14086: raise more helpful FileNotFoundError
|
99 |
+
# GH#29233 "File foo" instead of "File b'foo'"
|
100 |
+
parser = all_parsers
|
101 |
+
path = f"{uuid.uuid4()}.csv"
|
102 |
+
|
103 |
+
msg = r"\[Errno 2\]"
|
104 |
+
with pytest.raises(FileNotFoundError, match=msg) as e:
|
105 |
+
parser.read_csv(path)
|
106 |
+
assert path == e.value.filename
|
107 |
+
|
108 |
+
|
109 |
+
@td.skip_if_windows # os.chmod does not work in windows
|
110 |
+
def test_no_permission(all_parsers):
|
111 |
+
# GH 23784
|
112 |
+
parser = all_parsers
|
113 |
+
|
114 |
+
msg = r"\[Errno 13\]"
|
115 |
+
with tm.ensure_clean() as path:
|
116 |
+
os.chmod(path, 0) # make file unreadable
|
117 |
+
|
118 |
+
# verify that this process cannot open the file (not running as sudo)
|
119 |
+
try:
|
120 |
+
with open(path, encoding="utf-8"):
|
121 |
+
pass
|
122 |
+
pytest.skip("Running as sudo.")
|
123 |
+
except PermissionError:
|
124 |
+
pass
|
125 |
+
|
126 |
+
with pytest.raises(PermissionError, match=msg) as e:
|
127 |
+
parser.read_csv(path)
|
128 |
+
assert path == e.value.filename
|
129 |
+
|
130 |
+
|
131 |
+
@pytest.mark.parametrize(
|
132 |
+
"data,kwargs,expected,msg",
|
133 |
+
[
|
134 |
+
# gh-10728: WHITESPACE_LINE
|
135 |
+
(
|
136 |
+
"a,b,c\n4,5,6\n ",
|
137 |
+
{},
|
138 |
+
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
|
139 |
+
None,
|
140 |
+
),
|
141 |
+
# gh-10548: EAT_LINE_COMMENT
|
142 |
+
(
|
143 |
+
"a,b,c\n4,5,6\n#comment",
|
144 |
+
{"comment": "#"},
|
145 |
+
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
|
146 |
+
None,
|
147 |
+
),
|
148 |
+
# EAT_CRNL_NOP
|
149 |
+
(
|
150 |
+
"a,b,c\n4,5,6\n\r",
|
151 |
+
{},
|
152 |
+
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
|
153 |
+
None,
|
154 |
+
),
|
155 |
+
# EAT_COMMENT
|
156 |
+
(
|
157 |
+
"a,b,c\n4,5,6#comment",
|
158 |
+
{"comment": "#"},
|
159 |
+
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
|
160 |
+
None,
|
161 |
+
),
|
162 |
+
# SKIP_LINE
|
163 |
+
(
|
164 |
+
"a,b,c\n4,5,6\nskipme",
|
165 |
+
{"skiprows": [2]},
|
166 |
+
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
|
167 |
+
None,
|
168 |
+
),
|
169 |
+
# EAT_LINE_COMMENT
|
170 |
+
(
|
171 |
+
"a,b,c\n4,5,6\n#comment",
|
172 |
+
{"comment": "#", "skip_blank_lines": False},
|
173 |
+
DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),
|
174 |
+
None,
|
175 |
+
),
|
176 |
+
# IN_FIELD
|
177 |
+
(
|
178 |
+
"a,b,c\n4,5,6\n ",
|
179 |
+
{"skip_blank_lines": False},
|
180 |
+
DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),
|
181 |
+
None,
|
182 |
+
),
|
183 |
+
# EAT_CRNL
|
184 |
+
(
|
185 |
+
"a,b,c\n4,5,6\n\r",
|
186 |
+
{"skip_blank_lines": False},
|
187 |
+
DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),
|
188 |
+
None,
|
189 |
+
),
|
190 |
+
# ESCAPED_CHAR
|
191 |
+
(
|
192 |
+
"a,b,c\n4,5,6\n\\",
|
193 |
+
{"escapechar": "\\"},
|
194 |
+
None,
|
195 |
+
"(EOF following escape character)|(unexpected end of data)",
|
196 |
+
),
|
197 |
+
# ESCAPE_IN_QUOTED_FIELD
|
198 |
+
(
|
199 |
+
'a,b,c\n4,5,6\n"\\',
|
200 |
+
{"escapechar": "\\"},
|
201 |
+
None,
|
202 |
+
"(EOF inside string starting at row 2)|(unexpected end of data)",
|
203 |
+
),
|
204 |
+
# IN_QUOTED_FIELD
|
205 |
+
(
|
206 |
+
'a,b,c\n4,5,6\n"',
|
207 |
+
{"escapechar": "\\"},
|
208 |
+
None,
|
209 |
+
"(EOF inside string starting at row 2)|(unexpected end of data)",
|
210 |
+
),
|
211 |
+
],
|
212 |
+
ids=[
|
213 |
+
"whitespace-line",
|
214 |
+
"eat-line-comment",
|
215 |
+
"eat-crnl-nop",
|
216 |
+
"eat-comment",
|
217 |
+
"skip-line",
|
218 |
+
"eat-line-comment",
|
219 |
+
"in-field",
|
220 |
+
"eat-crnl",
|
221 |
+
"escaped-char",
|
222 |
+
"escape-in-quoted-field",
|
223 |
+
"in-quoted-field",
|
224 |
+
],
|
225 |
+
)
|
226 |
+
def test_eof_states(all_parsers, data, kwargs, expected, msg, request):
|
227 |
+
# see gh-10728, gh-10548
|
228 |
+
parser = all_parsers
|
229 |
+
|
230 |
+
if parser.engine == "pyarrow" and "comment" in kwargs:
|
231 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
232 |
+
with pytest.raises(ValueError, match=msg):
|
233 |
+
parser.read_csv(StringIO(data), **kwargs)
|
234 |
+
return
|
235 |
+
|
236 |
+
if parser.engine == "pyarrow" and "\r" not in data:
|
237 |
+
# pandas.errors.ParserError: CSV parse error: Expected 3 columns, got 1:
|
238 |
+
# ValueError: skiprows argument must be an integer when using engine='pyarrow'
|
239 |
+
# AssertionError: Regex pattern did not match.
|
240 |
+
pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
|
241 |
+
|
242 |
+
if expected is None:
|
243 |
+
with pytest.raises(ParserError, match=msg):
|
244 |
+
parser.read_csv(StringIO(data), **kwargs)
|
245 |
+
else:
|
246 |
+
result = parser.read_csv(StringIO(data), **kwargs)
|
247 |
+
tm.assert_frame_equal(result, expected)
|
248 |
+
|
249 |
+
|
250 |
+
def test_temporary_file(all_parsers):
|
251 |
+
# see gh-13398
|
252 |
+
parser = all_parsers
|
253 |
+
data = "0 0"
|
254 |
+
|
255 |
+
with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:
|
256 |
+
new_file.write(data)
|
257 |
+
new_file.flush()
|
258 |
+
new_file.seek(0)
|
259 |
+
|
260 |
+
if parser.engine == "pyarrow":
|
261 |
+
msg = "the 'pyarrow' engine does not support regex separators"
|
262 |
+
with pytest.raises(ValueError, match=msg):
|
263 |
+
parser.read_csv(new_file, sep=r"\s+", header=None)
|
264 |
+
return
|
265 |
+
|
266 |
+
result = parser.read_csv(new_file, sep=r"\s+", header=None)
|
267 |
+
|
268 |
+
expected = DataFrame([[0, 0]])
|
269 |
+
tm.assert_frame_equal(result, expected)
|
270 |
+
|
271 |
+
|
272 |
+
def test_internal_eof_byte(all_parsers):
|
273 |
+
# see gh-5500
|
274 |
+
parser = all_parsers
|
275 |
+
data = "a,b\n1\x1a,2"
|
276 |
+
|
277 |
+
expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])
|
278 |
+
result = parser.read_csv(StringIO(data))
|
279 |
+
tm.assert_frame_equal(result, expected)
|
280 |
+
|
281 |
+
|
282 |
+
def test_internal_eof_byte_to_file(all_parsers):
|
283 |
+
# see gh-16559
|
284 |
+
parser = all_parsers
|
285 |
+
data = b'c1,c2\r\n"test \x1a test", test\r\n'
|
286 |
+
expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])
|
287 |
+
path = f"__{uuid.uuid4()}__.csv"
|
288 |
+
|
289 |
+
with tm.ensure_clean(path) as path:
|
290 |
+
with open(path, "wb") as f:
|
291 |
+
f.write(data)
|
292 |
+
|
293 |
+
result = parser.read_csv(path)
|
294 |
+
tm.assert_frame_equal(result, expected)
|
295 |
+
|
296 |
+
|
297 |
+
def test_file_handle_string_io(all_parsers):
|
298 |
+
# gh-14418
|
299 |
+
#
|
300 |
+
# Don't close user provided file handles.
|
301 |
+
parser = all_parsers
|
302 |
+
data = "a,b\n1,2"
|
303 |
+
|
304 |
+
fh = StringIO(data)
|
305 |
+
parser.read_csv(fh)
|
306 |
+
assert not fh.closed
|
307 |
+
|
308 |
+
|
309 |
+
def test_file_handles_with_open(all_parsers, csv1):
|
310 |
+
# gh-14418
|
311 |
+
#
|
312 |
+
# Don't close user provided file handles.
|
313 |
+
parser = all_parsers
|
314 |
+
|
315 |
+
for mode in ["r", "rb"]:
|
316 |
+
with open(csv1, mode, encoding="utf-8" if mode == "r" else None) as f:
|
317 |
+
parser.read_csv(f)
|
318 |
+
assert not f.closed
|
319 |
+
|
320 |
+
|
321 |
+
def test_invalid_file_buffer_class(all_parsers):
|
322 |
+
# see gh-15337
|
323 |
+
class InvalidBuffer:
|
324 |
+
pass
|
325 |
+
|
326 |
+
parser = all_parsers
|
327 |
+
msg = "Invalid file path or buffer object type"
|
328 |
+
|
329 |
+
with pytest.raises(ValueError, match=msg):
|
330 |
+
parser.read_csv(InvalidBuffer())
|
331 |
+
|
332 |
+
|
333 |
+
def test_invalid_file_buffer_mock(all_parsers):
|
334 |
+
# see gh-15337
|
335 |
+
parser = all_parsers
|
336 |
+
msg = "Invalid file path or buffer object type"
|
337 |
+
|
338 |
+
class Foo:
|
339 |
+
pass
|
340 |
+
|
341 |
+
with pytest.raises(ValueError, match=msg):
|
342 |
+
parser.read_csv(Foo())
|
343 |
+
|
344 |
+
|
345 |
+
def test_valid_file_buffer_seems_invalid(all_parsers):
|
346 |
+
# gh-16135: we want to ensure that "tell" and "seek"
|
347 |
+
# aren't actually being used when we call `read_csv`
|
348 |
+
#
|
349 |
+
# Thus, while the object may look "invalid" (these
|
350 |
+
# methods are attributes of the `StringIO` class),
|
351 |
+
# it is still a valid file-object for our purposes.
|
352 |
+
class NoSeekTellBuffer(StringIO):
|
353 |
+
def tell(self):
|
354 |
+
raise AttributeError("No tell method")
|
355 |
+
|
356 |
+
def seek(self, pos, whence=0):
|
357 |
+
raise AttributeError("No seek method")
|
358 |
+
|
359 |
+
data = "a\n1"
|
360 |
+
parser = all_parsers
|
361 |
+
expected = DataFrame({"a": [1]})
|
362 |
+
|
363 |
+
result = parser.read_csv(NoSeekTellBuffer(data))
|
364 |
+
tm.assert_frame_equal(result, expected)
|
365 |
+
|
366 |
+
|
367 |
+
@pytest.mark.parametrize("io_class", [StringIO, BytesIO])
|
368 |
+
@pytest.mark.parametrize("encoding", [None, "utf-8"])
|
369 |
+
def test_read_csv_file_handle(all_parsers, io_class, encoding):
|
370 |
+
"""
|
371 |
+
Test whether read_csv does not close user-provided file handles.
|
372 |
+
|
373 |
+
GH 36980
|
374 |
+
"""
|
375 |
+
parser = all_parsers
|
376 |
+
expected = DataFrame({"a": [1], "b": [2]})
|
377 |
+
|
378 |
+
content = "a,b\n1,2"
|
379 |
+
handle = io_class(content.encode("utf-8") if io_class == BytesIO else content)
|
380 |
+
|
381 |
+
tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)
|
382 |
+
assert not handle.closed
|
383 |
+
|
384 |
+
|
385 |
+
def test_memory_map_compression(all_parsers, compression):
|
386 |
+
"""
|
387 |
+
Support memory map for compressed files.
|
388 |
+
|
389 |
+
GH 37621
|
390 |
+
"""
|
391 |
+
parser = all_parsers
|
392 |
+
expected = DataFrame({"a": [1], "b": [2]})
|
393 |
+
|
394 |
+
with tm.ensure_clean() as path:
|
395 |
+
expected.to_csv(path, index=False, compression=compression)
|
396 |
+
|
397 |
+
if parser.engine == "pyarrow":
|
398 |
+
msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
|
399 |
+
with pytest.raises(ValueError, match=msg):
|
400 |
+
parser.read_csv(path, memory_map=True, compression=compression)
|
401 |
+
return
|
402 |
+
|
403 |
+
result = parser.read_csv(path, memory_map=True, compression=compression)
|
404 |
+
|
405 |
+
tm.assert_frame_equal(
|
406 |
+
result,
|
407 |
+
expected,
|
408 |
+
)
|
409 |
+
|
410 |
+
|
411 |
+
def test_context_manager(all_parsers, datapath):
|
412 |
+
# make sure that opened files are closed
|
413 |
+
parser = all_parsers
|
414 |
+
|
415 |
+
path = datapath("io", "data", "csv", "iris.csv")
|
416 |
+
|
417 |
+
if parser.engine == "pyarrow":
|
418 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
419 |
+
with pytest.raises(ValueError, match=msg):
|
420 |
+
parser.read_csv(path, chunksize=1)
|
421 |
+
return
|
422 |
+
|
423 |
+
reader = parser.read_csv(path, chunksize=1)
|
424 |
+
assert not reader.handles.handle.closed
|
425 |
+
try:
|
426 |
+
with reader:
|
427 |
+
next(reader)
|
428 |
+
assert False
|
429 |
+
except AssertionError:
|
430 |
+
assert reader.handles.handle.closed
|
431 |
+
|
432 |
+
|
433 |
+
def test_context_manageri_user_provided(all_parsers, datapath):
|
434 |
+
# make sure that user-provided handles are not closed
|
435 |
+
parser = all_parsers
|
436 |
+
|
437 |
+
with open(datapath("io", "data", "csv", "iris.csv"), encoding="utf-8") as path:
|
438 |
+
if parser.engine == "pyarrow":
|
439 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
440 |
+
with pytest.raises(ValueError, match=msg):
|
441 |
+
parser.read_csv(path, chunksize=1)
|
442 |
+
return
|
443 |
+
|
444 |
+
reader = parser.read_csv(path, chunksize=1)
|
445 |
+
assert not reader.handles.handle.closed
|
446 |
+
try:
|
447 |
+
with reader:
|
448 |
+
next(reader)
|
449 |
+
assert False
|
450 |
+
except AssertionError:
|
451 |
+
assert not reader.handles.handle.closed
|
452 |
+
|
453 |
+
|
454 |
+
@skip_pyarrow # ParserError: Empty CSV file
|
455 |
+
def test_file_descriptor_leak(all_parsers, using_copy_on_write):
|
456 |
+
# GH 31488
|
457 |
+
parser = all_parsers
|
458 |
+
with tm.ensure_clean() as path:
|
459 |
+
with pytest.raises(EmptyDataError, match="No columns to parse from file"):
|
460 |
+
parser.read_csv(path)
|
461 |
+
|
462 |
+
|
463 |
+
def test_memory_map(all_parsers, csv_dir_path):
|
464 |
+
mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")
|
465 |
+
parser = all_parsers
|
466 |
+
|
467 |
+
expected = DataFrame(
|
468 |
+
{"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}
|
469 |
+
)
|
470 |
+
|
471 |
+
if parser.engine == "pyarrow":
|
472 |
+
msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
|
473 |
+
with pytest.raises(ValueError, match=msg):
|
474 |
+
parser.read_csv(mmap_file, memory_map=True)
|
475 |
+
return
|
476 |
+
|
477 |
+
result = parser.read_csv(mmap_file, memory_map=True)
|
478 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_float.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on both the Python and C engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
from io import StringIO
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
from pandas.compat import is_platform_linux
|
11 |
+
|
12 |
+
from pandas import DataFrame
|
13 |
+
import pandas._testing as tm
|
14 |
+
|
15 |
+
pytestmark = pytest.mark.filterwarnings(
|
16 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
17 |
+
)
|
18 |
+
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
|
19 |
+
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
|
20 |
+
|
21 |
+
|
22 |
+
@skip_pyarrow # ParserError: CSV parse error: Empty CSV file or block
|
23 |
+
def test_float_parser(all_parsers):
|
24 |
+
# see gh-9565
|
25 |
+
parser = all_parsers
|
26 |
+
data = "45e-1,4.5,45.,inf,-inf"
|
27 |
+
result = parser.read_csv(StringIO(data), header=None)
|
28 |
+
|
29 |
+
expected = DataFrame([[float(s) for s in data.split(",")]])
|
30 |
+
tm.assert_frame_equal(result, expected)
|
31 |
+
|
32 |
+
|
33 |
+
def test_scientific_no_exponent(all_parsers_all_precisions):
|
34 |
+
# see gh-12215
|
35 |
+
df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})
|
36 |
+
data = df.to_csv(index=False)
|
37 |
+
parser, precision = all_parsers_all_precisions
|
38 |
+
|
39 |
+
df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)
|
40 |
+
tm.assert_frame_equal(df_roundtrip, df)
|
41 |
+
|
42 |
+
|
43 |
+
@pytest.mark.parametrize(
|
44 |
+
"neg_exp",
|
45 |
+
[
|
46 |
+
-617,
|
47 |
+
-100000,
|
48 |
+
pytest.param(-99999999999999999, marks=pytest.mark.skip_ubsan),
|
49 |
+
],
|
50 |
+
)
|
51 |
+
def test_very_negative_exponent(all_parsers_all_precisions, neg_exp):
|
52 |
+
# GH#38753
|
53 |
+
parser, precision = all_parsers_all_precisions
|
54 |
+
|
55 |
+
data = f"data\n10E{neg_exp}"
|
56 |
+
result = parser.read_csv(StringIO(data), float_precision=precision)
|
57 |
+
expected = DataFrame({"data": [0.0]})
|
58 |
+
tm.assert_frame_equal(result, expected)
|
59 |
+
|
60 |
+
|
61 |
+
@pytest.mark.skip_ubsan
|
62 |
+
@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different
|
63 |
+
@pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999])
|
64 |
+
def test_too_many_exponent_digits(all_parsers_all_precisions, exp, request):
|
65 |
+
# GH#38753
|
66 |
+
parser, precision = all_parsers_all_precisions
|
67 |
+
data = f"data\n10E{exp}"
|
68 |
+
result = parser.read_csv(StringIO(data), float_precision=precision)
|
69 |
+
if precision == "round_trip":
|
70 |
+
if exp == 999999999999999999 and is_platform_linux():
|
71 |
+
mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")
|
72 |
+
request.applymarker(mark)
|
73 |
+
|
74 |
+
value = np.inf if exp > 0 else 0.0
|
75 |
+
expected = DataFrame({"data": [value]})
|
76 |
+
else:
|
77 |
+
expected = DataFrame({"data": [f"10E{exp}"]})
|
78 |
+
|
79 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_inf.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on both the Python and C engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
from io import StringIO
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
from pandas import (
|
11 |
+
DataFrame,
|
12 |
+
option_context,
|
13 |
+
)
|
14 |
+
import pandas._testing as tm
|
15 |
+
|
16 |
+
pytestmark = pytest.mark.filterwarnings(
|
17 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
18 |
+
)
|
19 |
+
|
20 |
+
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
|
21 |
+
|
22 |
+
|
23 |
+
@xfail_pyarrow # AssertionError: DataFrame.index are different
|
24 |
+
@pytest.mark.parametrize("na_filter", [True, False])
|
25 |
+
def test_inf_parsing(all_parsers, na_filter):
|
26 |
+
parser = all_parsers
|
27 |
+
data = """\
|
28 |
+
,A
|
29 |
+
a,inf
|
30 |
+
b,-inf
|
31 |
+
c,+Inf
|
32 |
+
d,-Inf
|
33 |
+
e,INF
|
34 |
+
f,-INF
|
35 |
+
g,+INf
|
36 |
+
h,-INf
|
37 |
+
i,inF
|
38 |
+
j,-inF"""
|
39 |
+
expected = DataFrame(
|
40 |
+
{"A": [float("inf"), float("-inf")] * 5},
|
41 |
+
index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],
|
42 |
+
)
|
43 |
+
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
|
44 |
+
tm.assert_frame_equal(result, expected)
|
45 |
+
|
46 |
+
|
47 |
+
@xfail_pyarrow # AssertionError: DataFrame.index are different
|
48 |
+
@pytest.mark.parametrize("na_filter", [True, False])
|
49 |
+
def test_infinity_parsing(all_parsers, na_filter):
|
50 |
+
parser = all_parsers
|
51 |
+
data = """\
|
52 |
+
,A
|
53 |
+
a,Infinity
|
54 |
+
b,-Infinity
|
55 |
+
c,+Infinity
|
56 |
+
"""
|
57 |
+
expected = DataFrame(
|
58 |
+
{"A": [float("infinity"), float("-infinity"), float("+infinity")]},
|
59 |
+
index=["a", "b", "c"],
|
60 |
+
)
|
61 |
+
result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)
|
62 |
+
tm.assert_frame_equal(result, expected)
|
63 |
+
|
64 |
+
|
65 |
+
def test_read_csv_with_use_inf_as_na(all_parsers):
|
66 |
+
# https://github.com/pandas-dev/pandas/issues/35493
|
67 |
+
parser = all_parsers
|
68 |
+
data = "1.0\nNaN\n3.0"
|
69 |
+
msg = "use_inf_as_na option is deprecated"
|
70 |
+
warn = FutureWarning
|
71 |
+
if parser.engine == "pyarrow":
|
72 |
+
warn = (FutureWarning, DeprecationWarning)
|
73 |
+
|
74 |
+
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
|
75 |
+
with option_context("use_inf_as_na", True):
|
76 |
+
result = parser.read_csv(StringIO(data), header=None)
|
77 |
+
expected = DataFrame([1.0, np.nan, 3.0])
|
78 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_iterator.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on both the Python and C engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
from io import StringIO
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas import (
|
10 |
+
DataFrame,
|
11 |
+
concat,
|
12 |
+
)
|
13 |
+
import pandas._testing as tm
|
14 |
+
|
15 |
+
pytestmark = pytest.mark.filterwarnings(
|
16 |
+
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
def test_iterator(all_parsers):
|
21 |
+
# see gh-6607
|
22 |
+
data = """index,A,B,C,D
|
23 |
+
foo,2,3,4,5
|
24 |
+
bar,7,8,9,10
|
25 |
+
baz,12,13,14,15
|
26 |
+
qux,12,13,14,15
|
27 |
+
foo2,12,13,14,15
|
28 |
+
bar2,12,13,14,15
|
29 |
+
"""
|
30 |
+
parser = all_parsers
|
31 |
+
kwargs = {"index_col": 0}
|
32 |
+
|
33 |
+
expected = parser.read_csv(StringIO(data), **kwargs)
|
34 |
+
|
35 |
+
if parser.engine == "pyarrow":
|
36 |
+
msg = "The 'iterator' option is not supported with the 'pyarrow' engine"
|
37 |
+
with pytest.raises(ValueError, match=msg):
|
38 |
+
parser.read_csv(StringIO(data), iterator=True, **kwargs)
|
39 |
+
return
|
40 |
+
|
41 |
+
with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader:
|
42 |
+
first_chunk = reader.read(3)
|
43 |
+
tm.assert_frame_equal(first_chunk, expected[:3])
|
44 |
+
|
45 |
+
last_chunk = reader.read(5)
|
46 |
+
tm.assert_frame_equal(last_chunk, expected[3:])
|
47 |
+
|
48 |
+
|
49 |
+
def test_iterator2(all_parsers):
|
50 |
+
parser = all_parsers
|
51 |
+
data = """A,B,C
|
52 |
+
foo,1,2,3
|
53 |
+
bar,4,5,6
|
54 |
+
baz,7,8,9
|
55 |
+
"""
|
56 |
+
|
57 |
+
if parser.engine == "pyarrow":
|
58 |
+
msg = "The 'iterator' option is not supported with the 'pyarrow' engine"
|
59 |
+
with pytest.raises(ValueError, match=msg):
|
60 |
+
parser.read_csv(StringIO(data), iterator=True)
|
61 |
+
return
|
62 |
+
|
63 |
+
with parser.read_csv(StringIO(data), iterator=True) as reader:
|
64 |
+
result = list(reader)
|
65 |
+
|
66 |
+
expected = DataFrame(
|
67 |
+
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
|
68 |
+
index=["foo", "bar", "baz"],
|
69 |
+
columns=["A", "B", "C"],
|
70 |
+
)
|
71 |
+
tm.assert_frame_equal(result[0], expected)
|
72 |
+
|
73 |
+
|
74 |
+
def test_iterator_stop_on_chunksize(all_parsers):
|
75 |
+
# gh-3967: stopping iteration when chunksize is specified
|
76 |
+
parser = all_parsers
|
77 |
+
data = """A,B,C
|
78 |
+
foo,1,2,3
|
79 |
+
bar,4,5,6
|
80 |
+
baz,7,8,9
|
81 |
+
"""
|
82 |
+
if parser.engine == "pyarrow":
|
83 |
+
msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"
|
84 |
+
with pytest.raises(ValueError, match=msg):
|
85 |
+
parser.read_csv(StringIO(data), chunksize=1)
|
86 |
+
return
|
87 |
+
|
88 |
+
with parser.read_csv(StringIO(data), chunksize=1) as reader:
|
89 |
+
result = list(reader)
|
90 |
+
|
91 |
+
assert len(result) == 3
|
92 |
+
expected = DataFrame(
|
93 |
+
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
|
94 |
+
index=["foo", "bar", "baz"],
|
95 |
+
columns=["A", "B", "C"],
|
96 |
+
)
|
97 |
+
tm.assert_frame_equal(concat(result), expected)
|
98 |
+
|
99 |
+
|
100 |
+
@pytest.mark.parametrize(
|
101 |
+
"kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}]
|
102 |
+
)
|
103 |
+
def test_iterator_skipfooter_errors(all_parsers, kwargs):
|
104 |
+
msg = "'skipfooter' not supported for iteration"
|
105 |
+
parser = all_parsers
|
106 |
+
data = "a\n1\n2"
|
107 |
+
|
108 |
+
if parser.engine == "pyarrow":
|
109 |
+
msg = (
|
110 |
+
"The '(chunksize|iterator)' option is not supported with the "
|
111 |
+
"'pyarrow' engine"
|
112 |
+
)
|
113 |
+
|
114 |
+
with pytest.raises(ValueError, match=msg):
|
115 |
+
with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _:
|
116 |
+
pass
|
117 |
+
|
118 |
+
|
119 |
+
def test_iteration_open_handle(all_parsers):
|
120 |
+
parser = all_parsers
|
121 |
+
kwargs = {"header": None}
|
122 |
+
|
123 |
+
with tm.ensure_clean() as path:
|
124 |
+
with open(path, "w", encoding="utf-8") as f:
|
125 |
+
f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")
|
126 |
+
|
127 |
+
with open(path, encoding="utf-8") as f:
|
128 |
+
for line in f:
|
129 |
+
if "CCC" in line:
|
130 |
+
break
|
131 |
+
|
132 |
+
result = parser.read_csv(f, **kwargs)
|
133 |
+
expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})
|
134 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_read_errors.py
ADDED
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests that work on the Python, C and PyArrow engines but do not have a
|
3 |
+
specific classification into the other test modules.
|
4 |
+
"""
|
5 |
+
import codecs
|
6 |
+
import csv
|
7 |
+
from io import StringIO
|
8 |
+
import os
|
9 |
+
from pathlib import Path
|
10 |
+
|
11 |
+
import numpy as np
|
12 |
+
import pytest
|
13 |
+
|
14 |
+
from pandas.compat import PY311
|
15 |
+
from pandas.errors import (
|
16 |
+
EmptyDataError,
|
17 |
+
ParserError,
|
18 |
+
ParserWarning,
|
19 |
+
)
|
20 |
+
|
21 |
+
from pandas import DataFrame
|
22 |
+
import pandas._testing as tm
|
23 |
+
|
24 |
+
xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
|
25 |
+
skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
|
26 |
+
|
27 |
+
|
28 |
+
def test_empty_decimal_marker(all_parsers):
|
29 |
+
data = """A|B|C
|
30 |
+
1|2,334|5
|
31 |
+
10|13|10.
|
32 |
+
"""
|
33 |
+
# Parsers support only length-1 decimals
|
34 |
+
msg = "Only length-1 decimal markers supported"
|
35 |
+
parser = all_parsers
|
36 |
+
|
37 |
+
if parser.engine == "pyarrow":
|
38 |
+
msg = (
|
39 |
+
"only single character unicode strings can be "
|
40 |
+
"converted to Py_UCS4, got length 0"
|
41 |
+
)
|
42 |
+
|
43 |
+
with pytest.raises(ValueError, match=msg):
|
44 |
+
parser.read_csv(StringIO(data), decimal="")
|
45 |
+
|
46 |
+
|
47 |
+
def test_bad_stream_exception(all_parsers, csv_dir_path):
|
48 |
+
# see gh-13652
|
49 |
+
#
|
50 |
+
# This test validates that both the Python engine and C engine will
|
51 |
+
# raise UnicodeDecodeError instead of C engine raising ParserError
|
52 |
+
# and swallowing the exception that caused read to fail.
|
53 |
+
path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")
|
54 |
+
codec = codecs.lookup("utf-8")
|
55 |
+
utf8 = codecs.lookup("utf-8")
|
56 |
+
parser = all_parsers
|
57 |
+
msg = "'utf-8' codec can't decode byte"
|
58 |
+
|
59 |
+
# Stream must be binary UTF8.
|
60 |
+
with open(path, "rb") as handle, codecs.StreamRecoder(
|
61 |
+
handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter
|
62 |
+
) as stream:
|
63 |
+
with pytest.raises(UnicodeDecodeError, match=msg):
|
64 |
+
parser.read_csv(stream)
|
65 |
+
|
66 |
+
|
67 |
+
def test_malformed(all_parsers):
|
68 |
+
# see gh-6607
|
69 |
+
parser = all_parsers
|
70 |
+
data = """ignore
|
71 |
+
A,B,C
|
72 |
+
1,2,3 # comment
|
73 |
+
1,2,3,4,5
|
74 |
+
2,3,4
|
75 |
+
"""
|
76 |
+
msg = "Expected 3 fields in line 4, saw 5"
|
77 |
+
err = ParserError
|
78 |
+
if parser.engine == "pyarrow":
|
79 |
+
msg = "The 'comment' option is not supported with the 'pyarrow' engine"
|
80 |
+
err = ValueError
|
81 |
+
with pytest.raises(err, match=msg):
|
82 |
+
parser.read_csv(StringIO(data), header=1, comment="#")
|
83 |
+
|
84 |
+
|
85 |
+
@pytest.mark.parametrize("nrows", [5, 3, None])
|
86 |
+
def test_malformed_chunks(all_parsers, nrows):
|
87 |
+
data = """ignore
|
88 |
+
A,B,C
|
89 |
+
skip
|
90 |
+
1,2,3
|
91 |
+
3,5,10 # comment
|
92 |
+
1,2,3,4,5
|
93 |
+
2,3,4
|
94 |
+
"""
|
95 |
+
parser = all_parsers
|
96 |
+
|
97 |
+
if parser.engine == "pyarrow":
|
98 |
+
msg = "The 'iterator' option is not supported with the 'pyarrow' engine"
|
99 |
+
with pytest.raises(ValueError, match=msg):
|
100 |
+
parser.read_csv(
|
101 |
+
StringIO(data),
|
102 |
+
header=1,
|
103 |
+
comment="#",
|
104 |
+
iterator=True,
|
105 |
+
chunksize=1,
|
106 |
+
skiprows=[2],
|
107 |
+
)
|
108 |
+
return
|
109 |
+
|
110 |
+
msg = "Expected 3 fields in line 6, saw 5"
|
111 |
+
with parser.read_csv(
|
112 |
+
StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]
|
113 |
+
) as reader:
|
114 |
+
with pytest.raises(ParserError, match=msg):
|
115 |
+
reader.read(nrows)
|
116 |
+
|
117 |
+
|
118 |
+
@xfail_pyarrow # does not raise
|
119 |
+
def test_catch_too_many_names(all_parsers):
|
120 |
+
# see gh-5156
|
121 |
+
data = """\
|
122 |
+
1,2,3
|
123 |
+
4,,6
|
124 |
+
7,8,9
|
125 |
+
10,11,12\n"""
|
126 |
+
parser = all_parsers
|
127 |
+
msg = (
|
128 |
+
"Too many columns specified: expected 4 and found 3"
|
129 |
+
if parser.engine == "c"
|
130 |
+
else "Number of passed names did not match "
|
131 |
+
"number of header fields in the file"
|
132 |
+
)
|
133 |
+
|
134 |
+
with pytest.raises(ValueError, match=msg):
|
135 |
+
parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])
|
136 |
+
|
137 |
+
|
138 |
+
@skip_pyarrow # CSV parse error: Empty CSV file or block
|
139 |
+
@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])
|
140 |
+
def test_raise_on_no_columns(all_parsers, nrows):
|
141 |
+
parser = all_parsers
|
142 |
+
data = "\n" * nrows
|
143 |
+
|
144 |
+
msg = "No columns to parse from file"
|
145 |
+
with pytest.raises(EmptyDataError, match=msg):
|
146 |
+
parser.read_csv(StringIO(data))
|
147 |
+
|
148 |
+
|
149 |
+
def test_unexpected_keyword_parameter_exception(all_parsers):
|
150 |
+
# GH-34976
|
151 |
+
parser = all_parsers
|
152 |
+
|
153 |
+
msg = "{}\\(\\) got an unexpected keyword argument 'foo'"
|
154 |
+
with pytest.raises(TypeError, match=msg.format("read_csv")):
|
155 |
+
parser.read_csv("foo.csv", foo=1)
|
156 |
+
with pytest.raises(TypeError, match=msg.format("read_table")):
|
157 |
+
parser.read_table("foo.tsv", foo=1)
|
158 |
+
|
159 |
+
|
160 |
+
def test_suppress_error_output(all_parsers):
|
161 |
+
# see gh-15925
|
162 |
+
parser = all_parsers
|
163 |
+
data = "a\n1\n1,2,3\n4\n5,6,7"
|
164 |
+
expected = DataFrame({"a": [1, 4]})
|
165 |
+
|
166 |
+
result = parser.read_csv(StringIO(data), on_bad_lines="skip")
|
167 |
+
tm.assert_frame_equal(result, expected)
|
168 |
+
|
169 |
+
|
170 |
+
def test_error_bad_lines(all_parsers):
|
171 |
+
# see gh-15925
|
172 |
+
parser = all_parsers
|
173 |
+
data = "a\n1\n1,2,3\n4\n5,6,7"
|
174 |
+
|
175 |
+
msg = "Expected 1 fields in line 3, saw 3"
|
176 |
+
|
177 |
+
if parser.engine == "pyarrow":
|
178 |
+
# "CSV parse error: Expected 1 columns, got 3: 1,2,3"
|
179 |
+
pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
|
180 |
+
|
181 |
+
with pytest.raises(ParserError, match=msg):
|
182 |
+
parser.read_csv(StringIO(data), on_bad_lines="error")
|
183 |
+
|
184 |
+
|
185 |
+
def test_warn_bad_lines(all_parsers):
|
186 |
+
# see gh-15925
|
187 |
+
parser = all_parsers
|
188 |
+
data = "a\n1\n1,2,3\n4\n5,6,7"
|
189 |
+
expected = DataFrame({"a": [1, 4]})
|
190 |
+
match_msg = "Skipping line"
|
191 |
+
|
192 |
+
expected_warning = ParserWarning
|
193 |
+
if parser.engine == "pyarrow":
|
194 |
+
match_msg = "Expected 1 columns, but found 3: 1,2,3"
|
195 |
+
expected_warning = (ParserWarning, DeprecationWarning)
|
196 |
+
|
197 |
+
with tm.assert_produces_warning(
|
198 |
+
expected_warning, match=match_msg, check_stacklevel=False
|
199 |
+
):
|
200 |
+
result = parser.read_csv(StringIO(data), on_bad_lines="warn")
|
201 |
+
tm.assert_frame_equal(result, expected)
|
202 |
+
|
203 |
+
|
204 |
+
def test_read_csv_wrong_num_columns(all_parsers):
|
205 |
+
# Too few columns.
|
206 |
+
data = """A,B,C,D,E,F
|
207 |
+
1,2,3,4,5,6
|
208 |
+
6,7,8,9,10,11,12
|
209 |
+
11,12,13,14,15,16
|
210 |
+
"""
|
211 |
+
parser = all_parsers
|
212 |
+
msg = "Expected 6 fields in line 3, saw 7"
|
213 |
+
|
214 |
+
if parser.engine == "pyarrow":
|
215 |
+
# Expected 6 columns, got 7: 6,7,8,9,10,11,12
|
216 |
+
pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
|
217 |
+
|
218 |
+
with pytest.raises(ParserError, match=msg):
|
219 |
+
parser.read_csv(StringIO(data))
|
220 |
+
|
221 |
+
|
222 |
+
def test_null_byte_char(request, all_parsers):
|
223 |
+
# see gh-2741
|
224 |
+
data = "\x00,foo"
|
225 |
+
names = ["a", "b"]
|
226 |
+
parser = all_parsers
|
227 |
+
|
228 |
+
if parser.engine == "c" or (parser.engine == "python" and PY311):
|
229 |
+
if parser.engine == "python" and PY311:
|
230 |
+
request.applymarker(
|
231 |
+
pytest.mark.xfail(
|
232 |
+
reason="In Python 3.11, this is read as an empty character not null"
|
233 |
+
)
|
234 |
+
)
|
235 |
+
expected = DataFrame([[np.nan, "foo"]], columns=names)
|
236 |
+
out = parser.read_csv(StringIO(data), names=names)
|
237 |
+
tm.assert_frame_equal(out, expected)
|
238 |
+
else:
|
239 |
+
if parser.engine == "pyarrow":
|
240 |
+
# CSV parse error: Empty CSV file or block: "
|
241 |
+
# cannot infer number of columns"
|
242 |
+
pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
|
243 |
+
else:
|
244 |
+
msg = "NULL byte detected"
|
245 |
+
with pytest.raises(ParserError, match=msg):
|
246 |
+
parser.read_csv(StringIO(data), names=names)
|
247 |
+
|
248 |
+
|
249 |
+
@pytest.mark.filterwarnings("always::ResourceWarning")
|
250 |
+
def test_open_file(request, all_parsers):
|
251 |
+
# GH 39024
|
252 |
+
parser = all_parsers
|
253 |
+
|
254 |
+
msg = "Could not determine delimiter"
|
255 |
+
err = csv.Error
|
256 |
+
if parser.engine == "c":
|
257 |
+
msg = "the 'c' engine does not support sep=None with delim_whitespace=False"
|
258 |
+
err = ValueError
|
259 |
+
elif parser.engine == "pyarrow":
|
260 |
+
msg = (
|
261 |
+
"the 'pyarrow' engine does not support sep=None with delim_whitespace=False"
|
262 |
+
)
|
263 |
+
err = ValueError
|
264 |
+
|
265 |
+
with tm.ensure_clean() as path:
|
266 |
+
file = Path(path)
|
267 |
+
file.write_bytes(b"\xe4\na\n1")
|
268 |
+
|
269 |
+
with tm.assert_produces_warning(None):
|
270 |
+
# should not trigger a ResourceWarning
|
271 |
+
with pytest.raises(err, match=msg):
|
272 |
+
parser.read_csv(file, sep=None, encoding_errors="replace")
|
273 |
+
|
274 |
+
|
275 |
+
def test_invalid_on_bad_line(all_parsers):
|
276 |
+
parser = all_parsers
|
277 |
+
data = "a\n1\n1,2,3\n4\n5,6,7"
|
278 |
+
with pytest.raises(ValueError, match="Argument abc is invalid for on_bad_lines"):
|
279 |
+
parser.read_csv(StringIO(data), on_bad_lines="abc")
|
280 |
+
|
281 |
+
|
282 |
+
def test_bad_header_uniform_error(all_parsers):
|
283 |
+
parser = all_parsers
|
284 |
+
data = "+++123456789...\ncol1,col2,col3,col4\n1,2,3,4\n"
|
285 |
+
msg = "Expected 2 fields in line 2, saw 4"
|
286 |
+
if parser.engine == "c":
|
287 |
+
msg = (
|
288 |
+
"Could not construct index. Requested to use 1 "
|
289 |
+
"number of columns, but 3 left to parse."
|
290 |
+
)
|
291 |
+
elif parser.engine == "pyarrow":
|
292 |
+
# "CSV parse error: Expected 1 columns, got 4: col1,col2,col3,col4"
|
293 |
+
pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
|
294 |
+
|
295 |
+
with pytest.raises(ParserError, match=msg):
|
296 |
+
parser.read_csv(StringIO(data), index_col=0, on_bad_lines="error")
|
297 |
+
|
298 |
+
|
299 |
+
def test_on_bad_lines_warn_correct_formatting(all_parsers):
|
300 |
+
# see gh-15925
|
301 |
+
parser = all_parsers
|
302 |
+
data = """1,2
|
303 |
+
a,b
|
304 |
+
a,b,c
|
305 |
+
a,b,d
|
306 |
+
a,b
|
307 |
+
"""
|
308 |
+
expected = DataFrame({"1": "a", "2": ["b"] * 2})
|
309 |
+
match_msg = "Skipping line"
|
310 |
+
|
311 |
+
expected_warning = ParserWarning
|
312 |
+
if parser.engine == "pyarrow":
|
313 |
+
match_msg = "Expected 2 columns, but found 3: a,b,c"
|
314 |
+
expected_warning = (ParserWarning, DeprecationWarning)
|
315 |
+
|
316 |
+
with tm.assert_produces_warning(
|
317 |
+
expected_warning, match=match_msg, check_stacklevel=False
|
318 |
+
):
|
319 |
+
result = parser.read_csv(StringIO(data), on_bad_lines="warn")
|
320 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (195 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_parse_dates.cpython-310.pyc
ADDED
Binary file (4.45 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_strings.cpython-310.pyc
ADDED
Binary file (2.28 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/usecols/__pycache__/test_usecols_basic.cpython-310.pyc
ADDED
Binary file (14.1 kB). View file
|
|