applied-ai-018 commited on
Commit
258907b
·
verified ·
1 Parent(s): 411258b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/conftest.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_api.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_query_eval.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/__init__.py +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/__pycache__/__init__.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/__pycache__/test_from_dict.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/__pycache__/test_from_records.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/test_from_dict.py +228 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/test_from_records.py +505 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/indexing/__pycache__/__init__.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/indexing/__pycache__/test_coercion.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/indexing/test_take.py +92 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__init__.py +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/__init__.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/conftest.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/generate_legacy_storage_files.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_clipboard.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_common.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_feather.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gbq.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gcs.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_html.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_http_headers.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_orc.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_parquet.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_pickle.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_s3.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_sql.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_stata.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py +342 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__init__.py +0 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_index.py +302 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_ints.py +231 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_verbose.py +81 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py +319 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py +643 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_comment.py +227 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_compression.py +211 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_concatenate_chunks.py +36 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_converters.py +263 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_dialect.py +195 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_encoding.py +337 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_header.py +733 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_index_col.py +376 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_mangle_dupes.py +179 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_multi_thread.py +150 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_na_values.py +771 -0
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (3.48 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_api.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_query_eval.cpython-310.pyc ADDED
Binary file (48.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (196 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/__pycache__/test_from_dict.cpython-310.pyc ADDED
Binary file (6.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/__pycache__/test_from_records.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/test_from_dict.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import OrderedDict
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas._config import using_pyarrow_string_dtype
7
+
8
+ from pandas import (
9
+ DataFrame,
10
+ Index,
11
+ MultiIndex,
12
+ RangeIndex,
13
+ Series,
14
+ )
15
+ import pandas._testing as tm
16
+
17
+
18
+ class TestFromDict:
19
+ # Note: these tests are specific to the from_dict method, not for
20
+ # passing dictionaries to DataFrame.__init__
21
+
22
+ def test_constructor_list_of_odicts(self):
23
+ data = [
24
+ OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
25
+ OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
26
+ OrderedDict([["a", 1.5], ["d", 6]]),
27
+ OrderedDict(),
28
+ OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
29
+ OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
30
+ ]
31
+
32
+ result = DataFrame(data)
33
+ expected = DataFrame.from_dict(
34
+ dict(zip(range(len(data)), data)), orient="index"
35
+ )
36
+ tm.assert_frame_equal(result, expected.reindex(result.index))
37
+
38
+ def test_constructor_single_row(self):
39
+ data = [OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]])]
40
+
41
+ result = DataFrame(data)
42
+ expected = DataFrame.from_dict(dict(zip([0], data)), orient="index").reindex(
43
+ result.index
44
+ )
45
+ tm.assert_frame_equal(result, expected)
46
+
47
+ @pytest.mark.skipif(
48
+ using_pyarrow_string_dtype(), reason="columns inferring logic broken"
49
+ )
50
+ def test_constructor_list_of_series(self):
51
+ data = [
52
+ OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
53
+ OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
54
+ ]
55
+ sdict = OrderedDict(zip(["x", "y"], data))
56
+ idx = Index(["a", "b", "c"])
57
+
58
+ # all named
59
+ data2 = [
60
+ Series([1.5, 3, 4], idx, dtype="O", name="x"),
61
+ Series([1.5, 3, 6], idx, name="y"),
62
+ ]
63
+ result = DataFrame(data2)
64
+ expected = DataFrame.from_dict(sdict, orient="index")
65
+ tm.assert_frame_equal(result, expected)
66
+
67
+ # some unnamed
68
+ data2 = [
69
+ Series([1.5, 3, 4], idx, dtype="O", name="x"),
70
+ Series([1.5, 3, 6], idx),
71
+ ]
72
+ result = DataFrame(data2)
73
+
74
+ sdict = OrderedDict(zip(["x", "Unnamed 0"], data))
75
+ expected = DataFrame.from_dict(sdict, orient="index")
76
+ tm.assert_frame_equal(result, expected)
77
+
78
+ # none named
79
+ data = [
80
+ OrderedDict([["a", 1.5], ["b", 3], ["c", 4], ["d", 6]]),
81
+ OrderedDict([["a", 1.5], ["b", 3], ["d", 6]]),
82
+ OrderedDict([["a", 1.5], ["d", 6]]),
83
+ OrderedDict(),
84
+ OrderedDict([["a", 1.5], ["b", 3], ["c", 4]]),
85
+ OrderedDict([["b", 3], ["c", 4], ["d", 6]]),
86
+ ]
87
+ data = [Series(d) for d in data]
88
+
89
+ result = DataFrame(data)
90
+ sdict = OrderedDict(zip(range(len(data)), data))
91
+ expected = DataFrame.from_dict(sdict, orient="index")
92
+ tm.assert_frame_equal(result, expected.reindex(result.index))
93
+
94
+ result2 = DataFrame(data, index=np.arange(6, dtype=np.int64))
95
+ tm.assert_frame_equal(result, result2)
96
+
97
+ result = DataFrame([Series(dtype=object)])
98
+ expected = DataFrame(index=[0])
99
+ tm.assert_frame_equal(result, expected)
100
+
101
+ data = [
102
+ OrderedDict([["a", 1.5], ["b", 3.0], ["c", 4.0]]),
103
+ OrderedDict([["a", 1.5], ["b", 3.0], ["c", 6.0]]),
104
+ ]
105
+ sdict = OrderedDict(zip(range(len(data)), data))
106
+
107
+ idx = Index(["a", "b", "c"])
108
+ data2 = [Series([1.5, 3, 4], idx, dtype="O"), Series([1.5, 3, 6], idx)]
109
+ result = DataFrame(data2)
110
+ expected = DataFrame.from_dict(sdict, orient="index")
111
+ tm.assert_frame_equal(result, expected)
112
+
113
+ def test_constructor_orient(self, float_string_frame):
114
+ data_dict = float_string_frame.T._series
115
+ recons = DataFrame.from_dict(data_dict, orient="index")
116
+ expected = float_string_frame.reindex(index=recons.index)
117
+ tm.assert_frame_equal(recons, expected)
118
+
119
+ # dict of sequence
120
+ a = {"hi": [32, 3, 3], "there": [3, 5, 3]}
121
+ rs = DataFrame.from_dict(a, orient="index")
122
+ xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
123
+ tm.assert_frame_equal(rs, xp)
124
+
125
+ def test_constructor_from_ordered_dict(self):
126
+ # GH#8425
127
+ a = OrderedDict(
128
+ [
129
+ ("one", OrderedDict([("col_a", "foo1"), ("col_b", "bar1")])),
130
+ ("two", OrderedDict([("col_a", "foo2"), ("col_b", "bar2")])),
131
+ ("three", OrderedDict([("col_a", "foo3"), ("col_b", "bar3")])),
132
+ ]
133
+ )
134
+ expected = DataFrame.from_dict(a, orient="columns").T
135
+ result = DataFrame.from_dict(a, orient="index")
136
+ tm.assert_frame_equal(result, expected)
137
+
138
+ def test_from_dict_columns_parameter(self):
139
+ # GH#18529
140
+ # Test new columns parameter for from_dict that was added to make
141
+ # from_items(..., orient='index', columns=[...]) easier to replicate
142
+ result = DataFrame.from_dict(
143
+ OrderedDict([("A", [1, 2]), ("B", [4, 5])]),
144
+ orient="index",
145
+ columns=["one", "two"],
146
+ )
147
+ expected = DataFrame([[1, 2], [4, 5]], index=["A", "B"], columns=["one", "two"])
148
+ tm.assert_frame_equal(result, expected)
149
+
150
+ msg = "cannot use columns parameter with orient='columns'"
151
+ with pytest.raises(ValueError, match=msg):
152
+ DataFrame.from_dict(
153
+ {"A": [1, 2], "B": [4, 5]},
154
+ orient="columns",
155
+ columns=["one", "two"],
156
+ )
157
+ with pytest.raises(ValueError, match=msg):
158
+ DataFrame.from_dict({"A": [1, 2], "B": [4, 5]}, columns=["one", "two"])
159
+
160
+ @pytest.mark.parametrize(
161
+ "data_dict, orient, expected",
162
+ [
163
+ ({}, "index", RangeIndex(0)),
164
+ (
165
+ [{("a",): 1}, {("a",): 2}],
166
+ "columns",
167
+ Index([("a",)], tupleize_cols=False),
168
+ ),
169
+ (
170
+ [OrderedDict([(("a",), 1), (("b",), 2)])],
171
+ "columns",
172
+ Index([("a",), ("b",)], tupleize_cols=False),
173
+ ),
174
+ ([{("a", "b"): 1}], "columns", Index([("a", "b")], tupleize_cols=False)),
175
+ ],
176
+ )
177
+ def test_constructor_from_dict_tuples(self, data_dict, orient, expected):
178
+ # GH#16769
179
+ df = DataFrame.from_dict(data_dict, orient)
180
+ result = df.columns
181
+ tm.assert_index_equal(result, expected)
182
+
183
+ def test_frame_dict_constructor_empty_series(self):
184
+ s1 = Series(
185
+ [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (2, 2), (2, 4)])
186
+ )
187
+ s2 = Series(
188
+ [1, 2, 3, 4], index=MultiIndex.from_tuples([(1, 2), (1, 3), (3, 2), (3, 4)])
189
+ )
190
+ s3 = Series(dtype=object)
191
+
192
+ # it works!
193
+ DataFrame({"foo": s1, "bar": s2, "baz": s3})
194
+ DataFrame.from_dict({"foo": s1, "baz": s3, "bar": s2})
195
+
196
+ def test_from_dict_scalars_requires_index(self):
197
+ msg = "If using all scalar values, you must pass an index"
198
+ with pytest.raises(ValueError, match=msg):
199
+ DataFrame.from_dict(OrderedDict([("b", 8), ("a", 5), ("a", 6)]))
200
+
201
+ def test_from_dict_orient_invalid(self):
202
+ msg = (
203
+ "Expected 'index', 'columns' or 'tight' for orient parameter. "
204
+ "Got 'abc' instead"
205
+ )
206
+ with pytest.raises(ValueError, match=msg):
207
+ DataFrame.from_dict({"foo": 1, "baz": 3, "bar": 2}, orient="abc")
208
+
209
+ def test_from_dict_order_with_single_column(self):
210
+ data = {
211
+ "alpha": {
212
+ "value2": 123,
213
+ "value1": 532,
214
+ "animal": 222,
215
+ "plant": False,
216
+ "name": "test",
217
+ }
218
+ }
219
+ result = DataFrame.from_dict(
220
+ data,
221
+ orient="columns",
222
+ )
223
+ expected = DataFrame(
224
+ [[123], [532], [222], [False], ["test"]],
225
+ index=["value2", "value1", "animal", "plant", "name"],
226
+ columns=["alpha"],
227
+ )
228
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/constructors/test_from_records.py ADDED
@@ -0,0 +1,505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterator
2
+ from datetime import datetime
3
+ from decimal import Decimal
4
+
5
+ import numpy as np
6
+ import pytest
7
+ import pytz
8
+
9
+ from pandas._config import using_pyarrow_string_dtype
10
+
11
+ from pandas.compat import is_platform_little_endian
12
+
13
+ from pandas import (
14
+ CategoricalIndex,
15
+ DataFrame,
16
+ Index,
17
+ Interval,
18
+ RangeIndex,
19
+ Series,
20
+ date_range,
21
+ )
22
+ import pandas._testing as tm
23
+
24
+
25
+ class TestFromRecords:
26
+ def test_from_records_dt64tz_frame(self):
27
+ # GH#51162 don't lose tz when calling from_records with DataFrame input
28
+ dti = date_range("2016-01-01", periods=10, tz="US/Pacific")
29
+ df = DataFrame({i: dti for i in range(4)})
30
+ with tm.assert_produces_warning(FutureWarning):
31
+ res = DataFrame.from_records(df)
32
+ tm.assert_frame_equal(res, df)
33
+
34
+ def test_from_records_with_datetimes(self):
35
+ # this may fail on certain platforms because of a numpy issue
36
+ # related GH#6140
37
+ if not is_platform_little_endian():
38
+ pytest.skip("known failure of test on non-little endian")
39
+
40
+ # construction with a null in a recarray
41
+ # GH#6140
42
+ expected = DataFrame({"EXPIRY": [datetime(2005, 3, 1, 0, 0), None]})
43
+
44
+ arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
45
+ dtypes = [("EXPIRY", "<M8[ns]")]
46
+
47
+ recarray = np.rec.fromarrays(arrdata, dtype=dtypes)
48
+
49
+ result = DataFrame.from_records(recarray)
50
+ tm.assert_frame_equal(result, expected)
51
+
52
+ # coercion should work too
53
+ arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]
54
+ dtypes = [("EXPIRY", "<M8[m]")]
55
+ recarray = np.rec.fromarrays(arrdata, dtype=dtypes)
56
+ result = DataFrame.from_records(recarray)
57
+ # we get the closest supported unit, "s"
58
+ expected["EXPIRY"] = expected["EXPIRY"].astype("M8[s]")
59
+ tm.assert_frame_equal(result, expected)
60
+
61
+ @pytest.mark.skipif(
62
+ using_pyarrow_string_dtype(), reason="dtype checking logic doesn't work"
63
+ )
64
+ def test_from_records_sequencelike(self):
65
+ df = DataFrame(
66
+ {
67
+ "A": np.array(
68
+ np.random.default_rng(2).standard_normal(6), dtype=np.float64
69
+ ),
70
+ "A1": np.array(
71
+ np.random.default_rng(2).standard_normal(6), dtype=np.float64
72
+ ),
73
+ "B": np.array(np.arange(6), dtype=np.int64),
74
+ "C": ["foo"] * 6,
75
+ "D": np.array([True, False] * 3, dtype=bool),
76
+ "E": np.array(
77
+ np.random.default_rng(2).standard_normal(6), dtype=np.float32
78
+ ),
79
+ "E1": np.array(
80
+ np.random.default_rng(2).standard_normal(6), dtype=np.float32
81
+ ),
82
+ "F": np.array(np.arange(6), dtype=np.int32),
83
+ }
84
+ )
85
+
86
+ # this is actually tricky to create the recordlike arrays and
87
+ # have the dtypes be intact
88
+ blocks = df._to_dict_of_blocks()
89
+ tuples = []
90
+ columns = []
91
+ dtypes = []
92
+ for dtype, b in blocks.items():
93
+ columns.extend(b.columns)
94
+ dtypes.extend([(c, np.dtype(dtype).descr[0][1]) for c in b.columns])
95
+ for i in range(len(df.index)):
96
+ tup = []
97
+ for _, b in blocks.items():
98
+ tup.extend(b.iloc[i].values)
99
+ tuples.append(tuple(tup))
100
+
101
+ recarray = np.array(tuples, dtype=dtypes).view(np.rec.recarray)
102
+ recarray2 = df.to_records()
103
+ lists = [list(x) for x in tuples]
104
+
105
+ # tuples (lose the dtype info)
106
+ result = DataFrame.from_records(tuples, columns=columns).reindex(
107
+ columns=df.columns
108
+ )
109
+
110
+ # created recarray and with to_records recarray (have dtype info)
111
+ result2 = DataFrame.from_records(recarray, columns=columns).reindex(
112
+ columns=df.columns
113
+ )
114
+ result3 = DataFrame.from_records(recarray2, columns=columns).reindex(
115
+ columns=df.columns
116
+ )
117
+
118
+ # list of tuples (no dtype info)
119
+ result4 = DataFrame.from_records(lists, columns=columns).reindex(
120
+ columns=df.columns
121
+ )
122
+
123
+ tm.assert_frame_equal(result, df, check_dtype=False)
124
+ tm.assert_frame_equal(result2, df)
125
+ tm.assert_frame_equal(result3, df)
126
+ tm.assert_frame_equal(result4, df, check_dtype=False)
127
+
128
+ # tuples is in the order of the columns
129
+ result = DataFrame.from_records(tuples)
130
+ tm.assert_index_equal(result.columns, RangeIndex(8))
131
+
132
+ # test exclude parameter & we are casting the results here (as we don't
133
+ # have dtype info to recover)
134
+ columns_to_test = [columns.index("C"), columns.index("E1")]
135
+
136
+ exclude = list(set(range(8)) - set(columns_to_test))
137
+ result = DataFrame.from_records(tuples, exclude=exclude)
138
+ result.columns = [columns[i] for i in sorted(columns_to_test)]
139
+ tm.assert_series_equal(result["C"], df["C"])
140
+ tm.assert_series_equal(result["E1"], df["E1"])
141
+
142
+ def test_from_records_sequencelike_empty(self):
143
+ # empty case
144
+ result = DataFrame.from_records([], columns=["foo", "bar", "baz"])
145
+ assert len(result) == 0
146
+ tm.assert_index_equal(result.columns, Index(["foo", "bar", "baz"]))
147
+
148
+ result = DataFrame.from_records([])
149
+ assert len(result) == 0
150
+ assert len(result.columns) == 0
151
+
152
+ def test_from_records_dictlike(self):
153
+ # test the dict methods
154
+ df = DataFrame(
155
+ {
156
+ "A": np.array(
157
+ np.random.default_rng(2).standard_normal(6), dtype=np.float64
158
+ ),
159
+ "A1": np.array(
160
+ np.random.default_rng(2).standard_normal(6), dtype=np.float64
161
+ ),
162
+ "B": np.array(np.arange(6), dtype=np.int64),
163
+ "C": ["foo"] * 6,
164
+ "D": np.array([True, False] * 3, dtype=bool),
165
+ "E": np.array(
166
+ np.random.default_rng(2).standard_normal(6), dtype=np.float32
167
+ ),
168
+ "E1": np.array(
169
+ np.random.default_rng(2).standard_normal(6), dtype=np.float32
170
+ ),
171
+ "F": np.array(np.arange(6), dtype=np.int32),
172
+ }
173
+ )
174
+
175
+ # columns is in a different order here than the actual items iterated
176
+ # from the dict
177
+ blocks = df._to_dict_of_blocks()
178
+ columns = []
179
+ for b in blocks.values():
180
+ columns.extend(b.columns)
181
+
182
+ asdict = dict(df.items())
183
+ asdict2 = {x: y.values for x, y in df.items()}
184
+
185
+ # dict of series & dict of ndarrays (have dtype info)
186
+ results = []
187
+ results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))
188
+ results.append(
189
+ DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns)
190
+ )
191
+ results.append(
192
+ DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns)
193
+ )
194
+
195
+ for r in results:
196
+ tm.assert_frame_equal(r, df)
197
+
198
+ def test_from_records_with_index_data(self):
199
+ df = DataFrame(
200
+ np.random.default_rng(2).standard_normal((10, 3)), columns=["A", "B", "C"]
201
+ )
202
+
203
+ data = np.random.default_rng(2).standard_normal(10)
204
+ with tm.assert_produces_warning(FutureWarning):
205
+ df1 = DataFrame.from_records(df, index=data)
206
+ tm.assert_index_equal(df1.index, Index(data))
207
+
208
+ def test_from_records_bad_index_column(self):
209
+ df = DataFrame(
210
+ np.random.default_rng(2).standard_normal((10, 3)), columns=["A", "B", "C"]
211
+ )
212
+
213
+ # should pass
214
+ with tm.assert_produces_warning(FutureWarning):
215
+ df1 = DataFrame.from_records(df, index=["C"])
216
+ tm.assert_index_equal(df1.index, Index(df.C))
217
+
218
+ with tm.assert_produces_warning(FutureWarning):
219
+ df1 = DataFrame.from_records(df, index="C")
220
+ tm.assert_index_equal(df1.index, Index(df.C))
221
+
222
+ # should fail
223
+ msg = "|".join(
224
+ [
225
+ r"'None of \[2\] are in the columns'",
226
+ ]
227
+ )
228
+ with pytest.raises(KeyError, match=msg):
229
+ with tm.assert_produces_warning(FutureWarning):
230
+ DataFrame.from_records(df, index=[2])
231
+ with pytest.raises(KeyError, match=msg):
232
+ with tm.assert_produces_warning(FutureWarning):
233
+ DataFrame.from_records(df, index=2)
234
+
235
+ def test_from_records_non_tuple(self):
236
+ class Record:
237
+ def __init__(self, *args) -> None:
238
+ self.args = args
239
+
240
+ def __getitem__(self, i):
241
+ return self.args[i]
242
+
243
+ def __iter__(self) -> Iterator:
244
+ return iter(self.args)
245
+
246
+ recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]
247
+ tups = [tuple(rec) for rec in recs]
248
+
249
+ result = DataFrame.from_records(recs)
250
+ expected = DataFrame.from_records(tups)
251
+ tm.assert_frame_equal(result, expected)
252
+
253
+ def test_from_records_len0_with_columns(self):
254
+ # GH#2633
255
+ result = DataFrame.from_records([], index="foo", columns=["foo", "bar"])
256
+ expected = Index(["bar"])
257
+
258
+ assert len(result) == 0
259
+ assert result.index.name == "foo"
260
+ tm.assert_index_equal(result.columns, expected)
261
+
262
+ def test_from_records_series_list_dict(self):
263
+ # GH#27358
264
+ expected = DataFrame([[{"a": 1, "b": 2}, {"a": 3, "b": 4}]]).T
265
+ data = Series([[{"a": 1, "b": 2}], [{"a": 3, "b": 4}]])
266
+ result = DataFrame.from_records(data)
267
+ tm.assert_frame_equal(result, expected)
268
+
269
+ def test_from_records_series_categorical_index(self):
270
+ # GH#32805
271
+ index = CategoricalIndex(
272
+ [Interval(-20, -10), Interval(-10, 0), Interval(0, 10)]
273
+ )
274
+ series_of_dicts = Series([{"a": 1}, {"a": 2}, {"b": 3}], index=index)
275
+ frame = DataFrame.from_records(series_of_dicts, index=index)
276
+ expected = DataFrame(
277
+ {"a": [1, 2, np.nan], "b": [np.nan, np.nan, 3]}, index=index
278
+ )
279
+ tm.assert_frame_equal(frame, expected)
280
+
281
+ def test_frame_from_records_utc(self):
282
+ rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)}
283
+
284
+ # it works
285
+ DataFrame.from_records([rec], index="begin_time")
286
+
287
+ def test_from_records_to_records(self):
288
+ # from numpy documentation
289
+ arr = np.zeros((2,), dtype=("i4,f4,S10"))
290
+ arr[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
291
+
292
+ DataFrame.from_records(arr)
293
+
294
+ index = Index(np.arange(len(arr))[::-1])
295
+ indexed_frame = DataFrame.from_records(arr, index=index)
296
+ tm.assert_index_equal(indexed_frame.index, index)
297
+
298
+ # without names, it should go to last ditch
299
+ arr2 = np.zeros((2, 3))
300
+ tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))
301
+
302
+ # wrong length
303
+ msg = "|".join(
304
+ [
305
+ r"Length of values \(2\) does not match length of index \(1\)",
306
+ ]
307
+ )
308
+ with pytest.raises(ValueError, match=msg):
309
+ DataFrame.from_records(arr, index=index[:-1])
310
+
311
+ indexed_frame = DataFrame.from_records(arr, index="f1")
312
+
313
+ # what to do?
314
+ records = indexed_frame.to_records()
315
+ assert len(records.dtype.names) == 3
316
+
317
+ records = indexed_frame.to_records(index=False)
318
+ assert len(records.dtype.names) == 2
319
+ assert "index" not in records.dtype.names
320
+
321
+ def test_from_records_nones(self):
322
+ tuples = [(1, 2, None, 3), (1, 2, None, 3), (None, 2, 5, 3)]
323
+
324
+ df = DataFrame.from_records(tuples, columns=["a", "b", "c", "d"])
325
+ assert np.isnan(df["c"][0])
326
+
327
+ def test_from_records_iterator(self):
328
+ arr = np.array(
329
+ [(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5.0, 5.0, 6, 6), (7.0, 7.0, 8, 8)],
330
+ dtype=[
331
+ ("x", np.float64),
332
+ ("u", np.float32),
333
+ ("y", np.int64),
334
+ ("z", np.int32),
335
+ ],
336
+ )
337
+ df = DataFrame.from_records(iter(arr), nrows=2)
338
+ xp = DataFrame(
339
+ {
340
+ "x": np.array([1.0, 3.0], dtype=np.float64),
341
+ "u": np.array([1.0, 3.0], dtype=np.float32),
342
+ "y": np.array([2, 4], dtype=np.int64),
343
+ "z": np.array([2, 4], dtype=np.int32),
344
+ }
345
+ )
346
+ tm.assert_frame_equal(df.reindex_like(xp), xp)
347
+
348
+ # no dtypes specified here, so just compare with the default
349
+ arr = [(1.0, 2), (3.0, 4), (5.0, 6), (7.0, 8)]
350
+ df = DataFrame.from_records(iter(arr), columns=["x", "y"], nrows=2)
351
+ tm.assert_frame_equal(df, xp.reindex(columns=["x", "y"]), check_dtype=False)
352
+
353
+ def test_from_records_tuples_generator(self):
354
+ def tuple_generator(length):
355
+ for i in range(length):
356
+ letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
357
+ yield (i, letters[i % len(letters)], i / length)
358
+
359
+ columns_names = ["Integer", "String", "Float"]
360
+ columns = [
361
+ [i[j] for i in tuple_generator(10)] for j in range(len(columns_names))
362
+ ]
363
+ data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
364
+ expected = DataFrame(data, columns=columns_names)
365
+
366
+ generator = tuple_generator(10)
367
+ result = DataFrame.from_records(generator, columns=columns_names)
368
+ tm.assert_frame_equal(result, expected)
369
+
370
+ def test_from_records_lists_generator(self):
371
+ def list_generator(length):
372
+ for i in range(length):
373
+ letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
374
+ yield [i, letters[i % len(letters)], i / length]
375
+
376
+ columns_names = ["Integer", "String", "Float"]
377
+ columns = [
378
+ [i[j] for i in list_generator(10)] for j in range(len(columns_names))
379
+ ]
380
+ data = {"Integer": columns[0], "String": columns[1], "Float": columns[2]}
381
+ expected = DataFrame(data, columns=columns_names)
382
+
383
+ generator = list_generator(10)
384
+ result = DataFrame.from_records(generator, columns=columns_names)
385
+ tm.assert_frame_equal(result, expected)
386
+
387
+ def test_from_records_columns_not_modified(self):
388
+ tuples = [(1, 2, 3), (1, 2, 3), (2, 5, 3)]
389
+
390
+ columns = ["a", "b", "c"]
391
+ original_columns = list(columns)
392
+
393
+ DataFrame.from_records(tuples, columns=columns, index="a")
394
+
395
+ assert columns == original_columns
396
+
397
+ def test_from_records_decimal(self):
398
+ tuples = [(Decimal("1.5"),), (Decimal("2.5"),), (None,)]
399
+
400
+ df = DataFrame.from_records(tuples, columns=["a"])
401
+ assert df["a"].dtype == object
402
+
403
+ df = DataFrame.from_records(tuples, columns=["a"], coerce_float=True)
404
+ assert df["a"].dtype == np.float64
405
+ assert np.isnan(df["a"].values[-1])
406
+
407
+ def test_from_records_duplicates(self):
408
+ result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
409
+
410
+ expected = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "a"])
411
+
412
+ tm.assert_frame_equal(result, expected)
413
+
414
+ def test_from_records_set_index_name(self):
415
+ def create_dict(order_id):
416
+ return {
417
+ "order_id": order_id,
418
+ "quantity": np.random.default_rng(2).integers(1, 10),
419
+ "price": np.random.default_rng(2).integers(1, 10),
420
+ }
421
+
422
+ documents = [create_dict(i) for i in range(10)]
423
+ # demo missing data
424
+ documents.append({"order_id": 10, "quantity": 5})
425
+
426
+ result = DataFrame.from_records(documents, index="order_id")
427
+ assert result.index.name == "order_id"
428
+
429
+ # MultiIndex
430
+ result = DataFrame.from_records(documents, index=["order_id", "quantity"])
431
+ assert result.index.names == ("order_id", "quantity")
432
+
433
+ def test_from_records_misc_brokenness(self):
434
+ # GH#2179
435
+
436
+ data = {1: ["foo"], 2: ["bar"]}
437
+
438
+ result = DataFrame.from_records(data, columns=["a", "b"])
439
+ exp = DataFrame(data, columns=["a", "b"])
440
+ tm.assert_frame_equal(result, exp)
441
+
442
+ # overlap in index/index_names
443
+
444
+ data = {"a": [1, 2, 3], "b": [4, 5, 6]}
445
+
446
+ result = DataFrame.from_records(data, index=["a", "b", "c"])
447
+ exp = DataFrame(data, index=["a", "b", "c"])
448
+ tm.assert_frame_equal(result, exp)
449
+
450
+ def test_from_records_misc_brokenness2(self):
451
+ # GH#2623
452
+ rows = []
453
+ rows.append([datetime(2010, 1, 1), 1])
454
+ rows.append([datetime(2010, 1, 2), "hi"]) # test col upconverts to obj
455
+ result = DataFrame.from_records(rows, columns=["date", "test"])
456
+ expected = DataFrame(
457
+ {"date": [row[0] for row in rows], "test": [row[1] for row in rows]}
458
+ )
459
+ tm.assert_frame_equal(result, expected)
460
+ assert result.dtypes["test"] == np.dtype(object)
461
+
462
+ def test_from_records_misc_brokenness3(self):
463
+ rows = []
464
+ rows.append([datetime(2010, 1, 1), 1])
465
+ rows.append([datetime(2010, 1, 2), 1])
466
+ result = DataFrame.from_records(rows, columns=["date", "test"])
467
+ expected = DataFrame(
468
+ {"date": [row[0] for row in rows], "test": [row[1] for row in rows]}
469
+ )
470
+ tm.assert_frame_equal(result, expected)
471
+
472
+ def test_from_records_empty(self):
473
+ # GH#3562
474
+ result = DataFrame.from_records([], columns=["a", "b", "c"])
475
+ expected = DataFrame(columns=["a", "b", "c"])
476
+ tm.assert_frame_equal(result, expected)
477
+
478
+ result = DataFrame.from_records([], columns=["a", "b", "b"])
479
+ expected = DataFrame(columns=["a", "b", "b"])
480
+ tm.assert_frame_equal(result, expected)
481
+
482
+ def test_from_records_empty_with_nonempty_fields_gh3682(self):
483
+ a = np.array([(1, 2)], dtype=[("id", np.int64), ("value", np.int64)])
484
+ df = DataFrame.from_records(a, index="id")
485
+
486
+ ex_index = Index([1], name="id")
487
+ expected = DataFrame({"value": [2]}, index=ex_index, columns=["value"])
488
+ tm.assert_frame_equal(df, expected)
489
+
490
+ b = a[:0]
491
+ df2 = DataFrame.from_records(b, index="id")
492
+ tm.assert_frame_equal(df2, df.iloc[:0])
493
+
494
+ def test_from_records_empty2(self):
495
+ # GH#42456
496
+ dtype = [("prop", int)]
497
+ shape = (0, len(dtype))
498
+ arr = np.empty(shape, dtype=dtype)
499
+
500
+ result = DataFrame.from_records(arr)
501
+ expected = DataFrame({"prop": np.array([], dtype=int)})
502
+ tm.assert_frame_equal(result, expected)
503
+
504
+ alt = DataFrame(arr)
505
+ tm.assert_frame_equal(alt, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/indexing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/indexing/__pycache__/test_coercion.cpython-310.pyc ADDED
Binary file (5.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/frame/indexing/test_take.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import pandas._testing as tm
4
+
5
+
6
+ class TestDataFrameTake:
7
+ def test_take_slices_deprecated(self, float_frame):
8
+ # GH#51539
9
+ df = float_frame
10
+
11
+ slc = slice(0, 4, 1)
12
+ with tm.assert_produces_warning(FutureWarning):
13
+ df.take(slc, axis=0)
14
+ with tm.assert_produces_warning(FutureWarning):
15
+ df.take(slc, axis=1)
16
+
17
+ def test_take(self, float_frame):
18
+ # homogeneous
19
+ order = [3, 1, 2, 0]
20
+ for df in [float_frame]:
21
+ result = df.take(order, axis=0)
22
+ expected = df.reindex(df.index.take(order))
23
+ tm.assert_frame_equal(result, expected)
24
+
25
+ # axis = 1
26
+ result = df.take(order, axis=1)
27
+ expected = df.loc[:, ["D", "B", "C", "A"]]
28
+ tm.assert_frame_equal(result, expected, check_names=False)
29
+
30
+ # negative indices
31
+ order = [2, 1, -1]
32
+ for df in [float_frame]:
33
+ result = df.take(order, axis=0)
34
+ expected = df.reindex(df.index.take(order))
35
+ tm.assert_frame_equal(result, expected)
36
+
37
+ result = df.take(order, axis=0)
38
+ tm.assert_frame_equal(result, expected)
39
+
40
+ # axis = 1
41
+ result = df.take(order, axis=1)
42
+ expected = df.loc[:, ["C", "B", "D"]]
43
+ tm.assert_frame_equal(result, expected, check_names=False)
44
+
45
+ # illegal indices
46
+ msg = "indices are out-of-bounds"
47
+ with pytest.raises(IndexError, match=msg):
48
+ df.take([3, 1, 2, 30], axis=0)
49
+ with pytest.raises(IndexError, match=msg):
50
+ df.take([3, 1, 2, -31], axis=0)
51
+ with pytest.raises(IndexError, match=msg):
52
+ df.take([3, 1, 2, 5], axis=1)
53
+ with pytest.raises(IndexError, match=msg):
54
+ df.take([3, 1, 2, -5], axis=1)
55
+
56
+ def test_take_mixed_type(self, float_string_frame):
57
+ # mixed-dtype
58
+ order = [4, 1, 2, 0, 3]
59
+ for df in [float_string_frame]:
60
+ result = df.take(order, axis=0)
61
+ expected = df.reindex(df.index.take(order))
62
+ tm.assert_frame_equal(result, expected)
63
+
64
+ # axis = 1
65
+ result = df.take(order, axis=1)
66
+ expected = df.loc[:, ["foo", "B", "C", "A", "D"]]
67
+ tm.assert_frame_equal(result, expected)
68
+
69
+ # negative indices
70
+ order = [4, 1, -2]
71
+ for df in [float_string_frame]:
72
+ result = df.take(order, axis=0)
73
+ expected = df.reindex(df.index.take(order))
74
+ tm.assert_frame_equal(result, expected)
75
+
76
+ # axis = 1
77
+ result = df.take(order, axis=1)
78
+ expected = df.loc[:, ["foo", "B", "D"]]
79
+ tm.assert_frame_equal(result, expected)
80
+
81
+ def test_take_mixed_numeric(self, mixed_float_frame, mixed_int_frame):
82
+ # by dtype
83
+ order = [1, 2, 0, 3]
84
+ for df in [mixed_float_frame, mixed_int_frame]:
85
+ result = df.take(order, axis=0)
86
+ expected = df.reindex(df.index.take(order))
87
+ tm.assert_frame_equal(result, expected)
88
+
89
+ # axis = 1
90
+ result = df.take(order, axis=1)
91
+ expected = df.loc[:, ["B", "C", "A", "D"]]
92
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (180 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/generate_legacy_storage_files.cpython-310.pyc ADDED
Binary file (8.28 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_clipboard.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (22.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_compression.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_feather.cpython-310.pyc ADDED
Binary file (9.17 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_fsspec.cpython-310.pyc ADDED
Binary file (9.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gbq.cpython-310.pyc ADDED
Binary file (901 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_gcs.cpython-310.pyc ADDED
Binary file (7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_html.cpython-310.pyc ADDED
Binary file (52 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_http_headers.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_orc.cpython-310.pyc ADDED
Binary file (9.97 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_parquet.cpython-310.pyc ADDED
Binary file (40.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_pickle.cpython-310.pyc ADDED
Binary file (20.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_s3.cpython-310.pyc ADDED
Binary file (1.27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_spss.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_sql.cpython-310.pyc ADDED
Binary file (108 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/__pycache__/test_stata.cpython-310.pyc ADDED
Binary file (81.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ self-contained to write legacy storage pickle files
3
+
4
+ To use this script. Create an environment where you want
5
+ generate pickles, say its for 0.20.3, with your pandas clone
6
+ in ~/pandas
7
+
8
+ . activate pandas_0.20.3
9
+ cd ~/pandas/pandas
10
+
11
+ $ python -m tests.io.generate_legacy_storage_files \
12
+ tests/io/data/legacy_pickle/0.20.3/ pickle
13
+
14
+ This script generates a storage file for the current arch, system,
15
+ and python version
16
+ pandas version: 0.20.3
17
+ output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/
18
+ storage format: pickle
19
+ created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle
20
+
21
+ The idea here is you are using the *current* version of the
22
+ generate_legacy_storage_files with an *older* version of pandas to
23
+ generate a pickle file. We will then check this file into a current
24
+ branch, and test using test_pickle.py. This will load the *older*
25
+ pickles and test versus the current data that is generated
26
+ (with main). These are then compared.
27
+
28
+ If we have cases where we changed the signature (e.g. we renamed
29
+ offset -> freq in Timestamp). Then we have to conditionally execute
30
+ in the generate_legacy_storage_files.py to make it
31
+ run under the older AND the newer version.
32
+
33
+ """
34
+
35
+ from datetime import timedelta
36
+ import os
37
+ import pickle
38
+ import platform as pl
39
+ import sys
40
+
41
+ # Remove script directory from path, otherwise Python will try to
42
+ # import the JSON test directory as the json module
43
+ sys.path.pop(0)
44
+
45
+ import numpy as np
46
+
47
+ import pandas
48
+ from pandas import (
49
+ Categorical,
50
+ DataFrame,
51
+ Index,
52
+ MultiIndex,
53
+ NaT,
54
+ Period,
55
+ RangeIndex,
56
+ Series,
57
+ Timestamp,
58
+ bdate_range,
59
+ date_range,
60
+ interval_range,
61
+ period_range,
62
+ timedelta_range,
63
+ )
64
+ from pandas.arrays import SparseArray
65
+
66
+ from pandas.tseries.offsets import (
67
+ FY5253,
68
+ BusinessDay,
69
+ BusinessHour,
70
+ CustomBusinessDay,
71
+ DateOffset,
72
+ Day,
73
+ Easter,
74
+ Hour,
75
+ LastWeekOfMonth,
76
+ Minute,
77
+ MonthBegin,
78
+ MonthEnd,
79
+ QuarterBegin,
80
+ QuarterEnd,
81
+ SemiMonthBegin,
82
+ SemiMonthEnd,
83
+ Week,
84
+ WeekOfMonth,
85
+ YearBegin,
86
+ YearEnd,
87
+ )
88
+
89
+
90
+ def _create_sp_series():
91
+ nan = np.nan
92
+
93
+ # nan-based
94
+ arr = np.arange(15, dtype=np.float64)
95
+ arr[7:12] = nan
96
+ arr[-1:] = nan
97
+
98
+ bseries = Series(SparseArray(arr, kind="block"))
99
+ bseries.name = "bseries"
100
+ return bseries
101
+
102
+
103
+ def _create_sp_tsseries():
104
+ nan = np.nan
105
+
106
+ # nan-based
107
+ arr = np.arange(15, dtype=np.float64)
108
+ arr[7:12] = nan
109
+ arr[-1:] = nan
110
+
111
+ date_index = bdate_range("1/1/2011", periods=len(arr))
112
+ bseries = Series(SparseArray(arr, kind="block"), index=date_index)
113
+ bseries.name = "btsseries"
114
+ return bseries
115
+
116
+
117
+ def _create_sp_frame():
118
+ nan = np.nan
119
+
120
+ data = {
121
+ "A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
122
+ "B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
123
+ "C": np.arange(10).astype(np.int64),
124
+ "D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
125
+ }
126
+
127
+ dates = bdate_range("1/1/2011", periods=10)
128
+ return DataFrame(data, index=dates).apply(SparseArray)
129
+
130
+
131
+ def create_pickle_data():
132
+ """create the pickle data"""
133
+ data = {
134
+ "A": [0.0, 1.0, 2.0, 3.0, np.nan],
135
+ "B": [0, 1, 0, 1, 0],
136
+ "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
137
+ "D": date_range("1/1/2009", periods=5),
138
+ "E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
139
+ }
140
+
141
+ scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")}
142
+
143
+ index = {
144
+ "int": Index(np.arange(10)),
145
+ "date": date_range("20130101", periods=10),
146
+ "period": period_range("2013-01-01", freq="M", periods=10),
147
+ "float": Index(np.arange(10, dtype=np.float64)),
148
+ "uint": Index(np.arange(10, dtype=np.uint64)),
149
+ "timedelta": timedelta_range("00:00:00", freq="30min", periods=10),
150
+ }
151
+
152
+ index["range"] = RangeIndex(10)
153
+
154
+ index["interval"] = interval_range(0, periods=10)
155
+
156
+ mi = {
157
+ "reg2": MultiIndex.from_tuples(
158
+ tuple(
159
+ zip(
160
+ *[
161
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
162
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
163
+ ]
164
+ )
165
+ ),
166
+ names=["first", "second"],
167
+ )
168
+ }
169
+
170
+ series = {
171
+ "float": Series(data["A"]),
172
+ "int": Series(data["B"]),
173
+ "mixed": Series(data["E"]),
174
+ "ts": Series(
175
+ np.arange(10).astype(np.int64), index=date_range("20130101", periods=10)
176
+ ),
177
+ "mi": Series(
178
+ np.arange(5).astype(np.float64),
179
+ index=MultiIndex.from_tuples(
180
+ tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"]
181
+ ),
182
+ ),
183
+ "dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
184
+ "cat": Series(Categorical(["foo", "bar", "baz"])),
185
+ "dt": Series(date_range("20130101", periods=5)),
186
+ "dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")),
187
+ "period": Series([Period("2000Q1")] * 5),
188
+ }
189
+
190
+ mixed_dup_df = DataFrame(data)
191
+ mixed_dup_df.columns = list("ABCDA")
192
+ frame = {
193
+ "float": DataFrame({"A": series["float"], "B": series["float"] + 1}),
194
+ "int": DataFrame({"A": series["int"], "B": series["int"] + 1}),
195
+ "mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
196
+ "mi": DataFrame(
197
+ {"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)},
198
+ index=MultiIndex.from_tuples(
199
+ tuple(
200
+ zip(
201
+ *[
202
+ ["bar", "bar", "baz", "baz", "baz"],
203
+ ["one", "two", "one", "two", "three"],
204
+ ]
205
+ )
206
+ ),
207
+ names=["first", "second"],
208
+ ),
209
+ ),
210
+ "dup": DataFrame(
211
+ np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"]
212
+ ),
213
+ "cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}),
214
+ "cat_and_float": DataFrame(
215
+ {
216
+ "A": Categorical(["foo", "bar", "baz"]),
217
+ "B": np.arange(3).astype(np.int64),
218
+ }
219
+ ),
220
+ "mixed_dup": mixed_dup_df,
221
+ "dt_mixed_tzs": DataFrame(
222
+ {
223
+ "A": Timestamp("20130102", tz="US/Eastern"),
224
+ "B": Timestamp("20130603", tz="CET"),
225
+ },
226
+ index=range(5),
227
+ ),
228
+ "dt_mixed2_tzs": DataFrame(
229
+ {
230
+ "A": Timestamp("20130102", tz="US/Eastern"),
231
+ "B": Timestamp("20130603", tz="CET"),
232
+ "C": Timestamp("20130603", tz="UTC"),
233
+ },
234
+ index=range(5),
235
+ ),
236
+ }
237
+
238
+ cat = {
239
+ "int8": Categorical(list("abcdefg")),
240
+ "int16": Categorical(np.arange(1000)),
241
+ "int32": Categorical(np.arange(10000)),
242
+ }
243
+
244
+ timestamp = {
245
+ "normal": Timestamp("2011-01-01"),
246
+ "nat": NaT,
247
+ "tz": Timestamp("2011-01-01", tz="US/Eastern"),
248
+ }
249
+
250
+ off = {
251
+ "DateOffset": DateOffset(years=1),
252
+ "DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
253
+ "BusinessDay": BusinessDay(offset=timedelta(seconds=9)),
254
+ "BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"),
255
+ "CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"),
256
+ "SemiMonthBegin": SemiMonthBegin(day_of_month=9),
257
+ "SemiMonthEnd": SemiMonthEnd(day_of_month=24),
258
+ "MonthBegin": MonthBegin(1),
259
+ "MonthEnd": MonthEnd(1),
260
+ "QuarterBegin": QuarterBegin(1),
261
+ "QuarterEnd": QuarterEnd(1),
262
+ "Day": Day(1),
263
+ "YearBegin": YearBegin(1),
264
+ "YearEnd": YearEnd(1),
265
+ "Week": Week(1),
266
+ "Week_Tues": Week(2, normalize=False, weekday=1),
267
+ "WeekOfMonth": WeekOfMonth(week=3, weekday=4),
268
+ "LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3),
269
+ "FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
270
+ "Easter": Easter(),
271
+ "Hour": Hour(1),
272
+ "Minute": Minute(1),
273
+ }
274
+
275
+ return {
276
+ "series": series,
277
+ "frame": frame,
278
+ "index": index,
279
+ "scalars": scalars,
280
+ "mi": mi,
281
+ "sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()},
282
+ "sp_frame": {"float": _create_sp_frame()},
283
+ "cat": cat,
284
+ "timestamp": timestamp,
285
+ "offsets": off,
286
+ }
287
+
288
+
289
+ def platform_name():
290
+ return "_".join(
291
+ [
292
+ str(pandas.__version__),
293
+ str(pl.machine()),
294
+ str(pl.system().lower()),
295
+ str(pl.python_version()),
296
+ ]
297
+ )
298
+
299
+
300
+ def write_legacy_pickles(output_dir):
301
+ version = pandas.__version__
302
+
303
+ print(
304
+ "This script generates a storage file for the current arch, system, "
305
+ "and python version"
306
+ )
307
+ print(f" pandas version: {version}")
308
+ print(f" output dir : {output_dir}")
309
+ print(" storage format: pickle")
310
+
311
+ pth = f"{platform_name()}.pickle"
312
+
313
+ with open(os.path.join(output_dir, pth), "wb") as fh:
314
+ pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL)
315
+
316
+ print(f"created pickle file: {pth}")
317
+
318
+
319
+ def write_legacy_file():
320
+ # force our cwd to be the first searched
321
+ sys.path.insert(0, "")
322
+
323
+ if not 3 <= len(sys.argv) <= 4:
324
+ sys.exit(
325
+ "Specify output directory and storage type: generate_legacy_"
326
+ "storage_files.py <output_dir> <storage_type> "
327
+ )
328
+
329
+ output_dir = str(sys.argv[1])
330
+ storage_type = str(sys.argv[2])
331
+
332
+ if not os.path.exists(output_dir):
333
+ os.mkdir(output_dir)
334
+
335
+ if storage_type == "pickle":
336
+ write_legacy_pickles(output_dir=output_dir)
337
+ else:
338
+ sys.exit("storage_type must be one of {'pickle'}")
339
+
340
+
341
+ if __name__ == "__main__":
342
+ write_legacy_file()
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_index.py ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from datetime import datetime
6
+ from io import StringIO
7
+ import os
8
+
9
+ import pytest
10
+
11
+ from pandas import (
12
+ DataFrame,
13
+ Index,
14
+ MultiIndex,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+ pytestmark = pytest.mark.filterwarnings(
19
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
20
+ )
21
+
22
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
23
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
24
+
25
+
26
+ @pytest.mark.parametrize(
27
+ "data,kwargs,expected",
28
+ [
29
+ (
30
+ """foo,2,3,4,5
31
+ bar,7,8,9,10
32
+ baz,12,13,14,15
33
+ qux,12,13,14,15
34
+ foo2,12,13,14,15
35
+ bar2,12,13,14,15
36
+ """,
37
+ {"index_col": 0, "names": ["index", "A", "B", "C", "D"]},
38
+ DataFrame(
39
+ [
40
+ [2, 3, 4, 5],
41
+ [7, 8, 9, 10],
42
+ [12, 13, 14, 15],
43
+ [12, 13, 14, 15],
44
+ [12, 13, 14, 15],
45
+ [12, 13, 14, 15],
46
+ ],
47
+ index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),
48
+ columns=["A", "B", "C", "D"],
49
+ ),
50
+ ),
51
+ (
52
+ """foo,one,2,3,4,5
53
+ foo,two,7,8,9,10
54
+ foo,three,12,13,14,15
55
+ bar,one,12,13,14,15
56
+ bar,two,12,13,14,15
57
+ """,
58
+ {"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},
59
+ DataFrame(
60
+ [
61
+ [2, 3, 4, 5],
62
+ [7, 8, 9, 10],
63
+ [12, 13, 14, 15],
64
+ [12, 13, 14, 15],
65
+ [12, 13, 14, 15],
66
+ ],
67
+ index=MultiIndex.from_tuples(
68
+ [
69
+ ("foo", "one"),
70
+ ("foo", "two"),
71
+ ("foo", "three"),
72
+ ("bar", "one"),
73
+ ("bar", "two"),
74
+ ],
75
+ names=["index1", "index2"],
76
+ ),
77
+ columns=["A", "B", "C", "D"],
78
+ ),
79
+ ),
80
+ ],
81
+ )
82
+ def test_pass_names_with_index(all_parsers, data, kwargs, expected):
83
+ parser = all_parsers
84
+ result = parser.read_csv(StringIO(data), **kwargs)
85
+ tm.assert_frame_equal(result, expected)
86
+
87
+
88
+ @pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])
89
+ def test_multi_index_no_level_names(all_parsers, index_col):
90
+ data = """index1,index2,A,B,C,D
91
+ foo,one,2,3,4,5
92
+ foo,two,7,8,9,10
93
+ foo,three,12,13,14,15
94
+ bar,one,12,13,14,15
95
+ bar,two,12,13,14,15
96
+ """
97
+ headless_data = "\n".join(data.split("\n")[1:])
98
+
99
+ names = ["A", "B", "C", "D"]
100
+ parser = all_parsers
101
+
102
+ result = parser.read_csv(
103
+ StringIO(headless_data), index_col=index_col, header=None, names=names
104
+ )
105
+ expected = parser.read_csv(StringIO(data), index_col=index_col)
106
+
107
+ # No index names in headless data.
108
+ expected.index.names = [None] * 2
109
+ tm.assert_frame_equal(result, expected)
110
+
111
+
112
+ @skip_pyarrow
113
+ def test_multi_index_no_level_names_implicit(all_parsers):
114
+ parser = all_parsers
115
+ data = """A,B,C,D
116
+ foo,one,2,3,4,5
117
+ foo,two,7,8,9,10
118
+ foo,three,12,13,14,15
119
+ bar,one,12,13,14,15
120
+ bar,two,12,13,14,15
121
+ """
122
+
123
+ result = parser.read_csv(StringIO(data))
124
+ expected = DataFrame(
125
+ [
126
+ [2, 3, 4, 5],
127
+ [7, 8, 9, 10],
128
+ [12, 13, 14, 15],
129
+ [12, 13, 14, 15],
130
+ [12, 13, 14, 15],
131
+ ],
132
+ columns=["A", "B", "C", "D"],
133
+ index=MultiIndex.from_tuples(
134
+ [
135
+ ("foo", "one"),
136
+ ("foo", "two"),
137
+ ("foo", "three"),
138
+ ("bar", "one"),
139
+ ("bar", "two"),
140
+ ]
141
+ ),
142
+ )
143
+ tm.assert_frame_equal(result, expected)
144
+
145
+
146
+ @xfail_pyarrow # TypeError: an integer is required
147
+ @pytest.mark.parametrize(
148
+ "data,expected,header",
149
+ [
150
+ ("a,b", DataFrame(columns=["a", "b"]), [0]),
151
+ (
152
+ "a,b\nc,d",
153
+ DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),
154
+ [0, 1],
155
+ ),
156
+ ],
157
+ )
158
+ @pytest.mark.parametrize("round_trip", [True, False])
159
+ def test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):
160
+ # see gh-14545
161
+ parser = all_parsers
162
+ data = expected.to_csv(index=False) if round_trip else data
163
+
164
+ result = parser.read_csv(StringIO(data), header=header)
165
+ tm.assert_frame_equal(result, expected)
166
+
167
+
168
+ @xfail_pyarrow # AssertionError: DataFrame.columns are different
169
+ def test_no_unnamed_index(all_parsers):
170
+ parser = all_parsers
171
+ data = """ id c0 c1 c2
172
+ 0 1 0 a b
173
+ 1 2 0 c d
174
+ 2 2 2 e f
175
+ """
176
+ result = parser.read_csv(StringIO(data), sep=" ")
177
+ expected = DataFrame(
178
+ [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],
179
+ columns=["Unnamed: 0", "id", "c0", "c1", "c2"],
180
+ )
181
+ tm.assert_frame_equal(result, expected)
182
+
183
+
184
+ def test_read_duplicate_index_explicit(all_parsers):
185
+ data = """index,A,B,C,D
186
+ foo,2,3,4,5
187
+ bar,7,8,9,10
188
+ baz,12,13,14,15
189
+ qux,12,13,14,15
190
+ foo,12,13,14,15
191
+ bar,12,13,14,15
192
+ """
193
+ parser = all_parsers
194
+ result = parser.read_csv(StringIO(data), index_col=0)
195
+
196
+ expected = DataFrame(
197
+ [
198
+ [2, 3, 4, 5],
199
+ [7, 8, 9, 10],
200
+ [12, 13, 14, 15],
201
+ [12, 13, 14, 15],
202
+ [12, 13, 14, 15],
203
+ [12, 13, 14, 15],
204
+ ],
205
+ columns=["A", "B", "C", "D"],
206
+ index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),
207
+ )
208
+ tm.assert_frame_equal(result, expected)
209
+
210
+
211
+ @skip_pyarrow
212
+ def test_read_duplicate_index_implicit(all_parsers):
213
+ data = """A,B,C,D
214
+ foo,2,3,4,5
215
+ bar,7,8,9,10
216
+ baz,12,13,14,15
217
+ qux,12,13,14,15
218
+ foo,12,13,14,15
219
+ bar,12,13,14,15
220
+ """
221
+ parser = all_parsers
222
+ result = parser.read_csv(StringIO(data))
223
+
224
+ expected = DataFrame(
225
+ [
226
+ [2, 3, 4, 5],
227
+ [7, 8, 9, 10],
228
+ [12, 13, 14, 15],
229
+ [12, 13, 14, 15],
230
+ [12, 13, 14, 15],
231
+ [12, 13, 14, 15],
232
+ ],
233
+ columns=["A", "B", "C", "D"],
234
+ index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),
235
+ )
236
+ tm.assert_frame_equal(result, expected)
237
+
238
+
239
+ @skip_pyarrow
240
+ def test_read_csv_no_index_name(all_parsers, csv_dir_path):
241
+ parser = all_parsers
242
+ csv2 = os.path.join(csv_dir_path, "test2.csv")
243
+ result = parser.read_csv(csv2, index_col=0, parse_dates=True)
244
+
245
+ expected = DataFrame(
246
+ [
247
+ [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],
248
+ [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],
249
+ [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],
250
+ [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],
251
+ [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],
252
+ ],
253
+ columns=["A", "B", "C", "D", "E"],
254
+ index=Index(
255
+ [
256
+ datetime(2000, 1, 3),
257
+ datetime(2000, 1, 4),
258
+ datetime(2000, 1, 5),
259
+ datetime(2000, 1, 6),
260
+ datetime(2000, 1, 7),
261
+ ]
262
+ ),
263
+ )
264
+ tm.assert_frame_equal(result, expected)
265
+
266
+
267
+ @skip_pyarrow
268
+ def test_empty_with_index(all_parsers):
269
+ # see gh-10184
270
+ data = "x,y"
271
+ parser = all_parsers
272
+ result = parser.read_csv(StringIO(data), index_col=0)
273
+
274
+ expected = DataFrame(columns=["y"], index=Index([], name="x"))
275
+ tm.assert_frame_equal(result, expected)
276
+
277
+
278
+ # CSV parse error: Empty CSV file or block: cannot infer number of columns
279
+ @skip_pyarrow
280
+ def test_empty_with_multi_index(all_parsers):
281
+ # see gh-10467
282
+ data = "x,y,z"
283
+ parser = all_parsers
284
+ result = parser.read_csv(StringIO(data), index_col=["x", "y"])
285
+
286
+ expected = DataFrame(
287
+ columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])
288
+ )
289
+ tm.assert_frame_equal(result, expected)
290
+
291
+
292
+ # CSV parse error: Empty CSV file or block: cannot infer number of columns
293
+ @skip_pyarrow
294
+ def test_empty_with_reversed_multi_index(all_parsers):
295
+ data = "x,y,z"
296
+ parser = all_parsers
297
+ result = parser.read_csv(StringIO(data), index_col=[1, 0])
298
+
299
+ expected = DataFrame(
300
+ columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])
301
+ )
302
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_ints.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from io import StringIO
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas import (
11
+ DataFrame,
12
+ Series,
13
+ )
14
+ import pandas._testing as tm
15
+
16
+ pytestmark = pytest.mark.filterwarnings(
17
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
18
+ )
19
+
20
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
21
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
22
+
23
+
24
+ def test_int_conversion(all_parsers):
25
+ data = """A,B
26
+ 1.0,1
27
+ 2.0,2
28
+ 3.0,3
29
+ """
30
+ parser = all_parsers
31
+ result = parser.read_csv(StringIO(data))
32
+
33
+ expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])
34
+ tm.assert_frame_equal(result, expected)
35
+
36
+
37
+ @pytest.mark.parametrize(
38
+ "data,kwargs,expected",
39
+ [
40
+ (
41
+ "A,B\nTrue,1\nFalse,2\nTrue,3",
42
+ {},
43
+ DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
44
+ ),
45
+ (
46
+ "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",
47
+ {"true_values": ["yes", "Yes", "YES"], "false_values": ["no", "NO", "No"]},
48
+ DataFrame(
49
+ [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],
50
+ columns=["A", "B"],
51
+ ),
52
+ ),
53
+ (
54
+ "A,B\nTRUE,1\nFALSE,2\nTRUE,3",
55
+ {},
56
+ DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),
57
+ ),
58
+ (
59
+ "A,B\nfoo,bar\nbar,foo",
60
+ {"true_values": ["foo"], "false_values": ["bar"]},
61
+ DataFrame([[True, False], [False, True]], columns=["A", "B"]),
62
+ ),
63
+ ],
64
+ )
65
+ def test_parse_bool(all_parsers, data, kwargs, expected):
66
+ parser = all_parsers
67
+ result = parser.read_csv(StringIO(data), **kwargs)
68
+ tm.assert_frame_equal(result, expected)
69
+
70
+
71
+ def test_parse_integers_above_fp_precision(all_parsers):
72
+ data = """Numbers
73
+ 17007000002000191
74
+ 17007000002000191
75
+ 17007000002000191
76
+ 17007000002000191
77
+ 17007000002000192
78
+ 17007000002000192
79
+ 17007000002000192
80
+ 17007000002000192
81
+ 17007000002000192
82
+ 17007000002000194"""
83
+ parser = all_parsers
84
+ result = parser.read_csv(StringIO(data))
85
+ expected = DataFrame(
86
+ {
87
+ "Numbers": [
88
+ 17007000002000191,
89
+ 17007000002000191,
90
+ 17007000002000191,
91
+ 17007000002000191,
92
+ 17007000002000192,
93
+ 17007000002000192,
94
+ 17007000002000192,
95
+ 17007000002000192,
96
+ 17007000002000192,
97
+ 17007000002000194,
98
+ ]
99
+ }
100
+ )
101
+ tm.assert_frame_equal(result, expected)
102
+
103
+
104
+ @pytest.mark.parametrize("sep", [" ", r"\s+"])
105
+ def test_integer_overflow_bug(all_parsers, sep):
106
+ # see gh-2601
107
+ data = "65248E10 11\n55555E55 22\n"
108
+ parser = all_parsers
109
+ if parser.engine == "pyarrow" and sep != " ":
110
+ msg = "the 'pyarrow' engine does not support regex separators"
111
+ with pytest.raises(ValueError, match=msg):
112
+ parser.read_csv(StringIO(data), header=None, sep=sep)
113
+ return
114
+
115
+ result = parser.read_csv(StringIO(data), header=None, sep=sep)
116
+ expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])
117
+ tm.assert_frame_equal(result, expected)
118
+
119
+
120
+ def test_int64_min_issues(all_parsers):
121
+ # see gh-2599
122
+ parser = all_parsers
123
+ data = "A,B\n0,0\n0,"
124
+ result = parser.read_csv(StringIO(data))
125
+
126
+ expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})
127
+ tm.assert_frame_equal(result, expected)
128
+
129
+
130
+ @pytest.mark.parametrize("conv", [None, np.int64, np.uint64])
131
+ def test_int64_overflow(all_parsers, conv, request):
132
+ data = """ID
133
+ 00013007854817840016671868
134
+ 00013007854817840016749251
135
+ 00013007854817840016754630
136
+ 00013007854817840016781876
137
+ 00013007854817840017028824
138
+ 00013007854817840017963235
139
+ 00013007854817840018860166"""
140
+ parser = all_parsers
141
+
142
+ if conv is None:
143
+ # 13007854817840016671868 > UINT64_MAX, so this
144
+ # will overflow and return object as the dtype.
145
+ if parser.engine == "pyarrow":
146
+ mark = pytest.mark.xfail(reason="parses to float64")
147
+ request.applymarker(mark)
148
+
149
+ result = parser.read_csv(StringIO(data))
150
+ expected = DataFrame(
151
+ [
152
+ "00013007854817840016671868",
153
+ "00013007854817840016749251",
154
+ "00013007854817840016754630",
155
+ "00013007854817840016781876",
156
+ "00013007854817840017028824",
157
+ "00013007854817840017963235",
158
+ "00013007854817840018860166",
159
+ ],
160
+ columns=["ID"],
161
+ )
162
+ tm.assert_frame_equal(result, expected)
163
+ else:
164
+ # 13007854817840016671868 > UINT64_MAX, so attempts
165
+ # to cast to either int64 or uint64 will result in
166
+ # an OverflowError being raised.
167
+ msg = "|".join(
168
+ [
169
+ "Python int too large to convert to C long",
170
+ "long too big to convert",
171
+ "int too big to convert",
172
+ ]
173
+ )
174
+ err = OverflowError
175
+ if parser.engine == "pyarrow":
176
+ err = ValueError
177
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
178
+
179
+ with pytest.raises(err, match=msg):
180
+ parser.read_csv(StringIO(data), converters={"ID": conv})
181
+
182
+
183
+ @skip_pyarrow # CSV parse error: Empty CSV file or block
184
+ @pytest.mark.parametrize(
185
+ "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]
186
+ )
187
+ def test_int64_uint64_range(all_parsers, val):
188
+ # These numbers fall right inside the int64-uint64
189
+ # range, so they should be parsed as string.
190
+ parser = all_parsers
191
+ result = parser.read_csv(StringIO(str(val)), header=None)
192
+
193
+ expected = DataFrame([val])
194
+ tm.assert_frame_equal(result, expected)
195
+
196
+
197
+ @skip_pyarrow # CSV parse error: Empty CSV file or block
198
+ @pytest.mark.parametrize(
199
+ "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]
200
+ )
201
+ def test_outside_int64_uint64_range(all_parsers, val):
202
+ # These numbers fall just outside the int64-uint64
203
+ # range, so they should be parsed as string.
204
+ parser = all_parsers
205
+ result = parser.read_csv(StringIO(str(val)), header=None)
206
+
207
+ expected = DataFrame([str(val)])
208
+ tm.assert_frame_equal(result, expected)
209
+
210
+
211
+ @xfail_pyarrow # gets float64 dtype instead of object
212
+ @pytest.mark.parametrize("exp_data", [[str(-1), str(2**63)], [str(2**63), str(-1)]])
213
+ def test_numeric_range_too_wide(all_parsers, exp_data):
214
+ # No numerical dtype can hold both negative and uint64
215
+ # values, so they should be cast as string.
216
+ parser = all_parsers
217
+ data = "\n".join(exp_data)
218
+ expected = DataFrame(exp_data)
219
+
220
+ result = parser.read_csv(StringIO(data), header=None)
221
+ tm.assert_frame_equal(result, expected)
222
+
223
+
224
+ def test_integer_precision(all_parsers):
225
+ # Gh 7072
226
+ s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765
227
+ 5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389"""
228
+ parser = all_parsers
229
+ result = parser.read_csv(StringIO(s), header=None)[4]
230
+ expected = Series([4321583677327450765, 4321113141090630389], name=4)
231
+ tm.assert_series_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/common/test_verbose.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that work on both the Python and C engines but do not have a
3
+ specific classification into the other test modules.
4
+ """
5
+ from io import StringIO
6
+
7
+ import pytest
8
+
9
+ import pandas._testing as tm
10
+
11
+ depr_msg = "The 'verbose' keyword in pd.read_csv is deprecated"
12
+
13
+
14
+ def test_verbose_read(all_parsers, capsys):
15
+ parser = all_parsers
16
+ data = """a,b,c,d
17
+ one,1,2,3
18
+ one,1,2,3
19
+ ,1,2,3
20
+ one,1,2,3
21
+ ,1,2,3
22
+ ,1,2,3
23
+ one,1,2,3
24
+ two,1,2,3"""
25
+
26
+ if parser.engine == "pyarrow":
27
+ msg = "The 'verbose' option is not supported with the 'pyarrow' engine"
28
+ with pytest.raises(ValueError, match=msg):
29
+ with tm.assert_produces_warning(
30
+ FutureWarning, match=depr_msg, check_stacklevel=False
31
+ ):
32
+ parser.read_csv(StringIO(data), verbose=True)
33
+ return
34
+
35
+ # Engines are verbose in different ways.
36
+ with tm.assert_produces_warning(
37
+ FutureWarning, match=depr_msg, check_stacklevel=False
38
+ ):
39
+ parser.read_csv(StringIO(data), verbose=True)
40
+ captured = capsys.readouterr()
41
+
42
+ if parser.engine == "c":
43
+ assert "Tokenization took:" in captured.out
44
+ assert "Parser memory cleanup took:" in captured.out
45
+ else: # Python engine
46
+ assert captured.out == "Filled 3 NA values in column a\n"
47
+
48
+
49
+ def test_verbose_read2(all_parsers, capsys):
50
+ parser = all_parsers
51
+ data = """a,b,c,d
52
+ one,1,2,3
53
+ two,1,2,3
54
+ three,1,2,3
55
+ four,1,2,3
56
+ five,1,2,3
57
+ ,1,2,3
58
+ seven,1,2,3
59
+ eight,1,2,3"""
60
+
61
+ if parser.engine == "pyarrow":
62
+ msg = "The 'verbose' option is not supported with the 'pyarrow' engine"
63
+ with pytest.raises(ValueError, match=msg):
64
+ with tm.assert_produces_warning(
65
+ FutureWarning, match=depr_msg, check_stacklevel=False
66
+ ):
67
+ parser.read_csv(StringIO(data), verbose=True, index_col=0)
68
+ return
69
+
70
+ with tm.assert_produces_warning(
71
+ FutureWarning, match=depr_msg, check_stacklevel=False
72
+ ):
73
+ parser.read_csv(StringIO(data), verbose=True, index_col=0)
74
+ captured = capsys.readouterr()
75
+
76
+ # Engines are verbose in different ways.
77
+ if parser.engine == "c":
78
+ assert "Tokenization took:" in captured.out
79
+ assert "Parser memory cleanup took:" in captured.out
80
+ else: # Python engine
81
+ assert captured.out == "Filled 1 NA values in column a\n"
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/conftest.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+
5
+ import pytest
6
+
7
+ from pandas.compat._optional import VERSIONS
8
+
9
+ from pandas import (
10
+ read_csv,
11
+ read_table,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+
16
+ class BaseParser:
17
+ engine: str | None = None
18
+ low_memory = True
19
+ float_precision_choices: list[str | None] = []
20
+
21
+ def update_kwargs(self, kwargs):
22
+ kwargs = kwargs.copy()
23
+ kwargs.update({"engine": self.engine, "low_memory": self.low_memory})
24
+
25
+ return kwargs
26
+
27
+ def read_csv(self, *args, **kwargs):
28
+ kwargs = self.update_kwargs(kwargs)
29
+ return read_csv(*args, **kwargs)
30
+
31
+ def read_csv_check_warnings(
32
+ self,
33
+ warn_type: type[Warning],
34
+ warn_msg: str,
35
+ *args,
36
+ raise_on_extra_warnings=True,
37
+ check_stacklevel: bool = True,
38
+ **kwargs,
39
+ ):
40
+ # We need to check the stacklevel here instead of in the tests
41
+ # since this is where read_csv is called and where the warning
42
+ # should point to.
43
+ kwargs = self.update_kwargs(kwargs)
44
+ with tm.assert_produces_warning(
45
+ warn_type,
46
+ match=warn_msg,
47
+ raise_on_extra_warnings=raise_on_extra_warnings,
48
+ check_stacklevel=check_stacklevel,
49
+ ):
50
+ return read_csv(*args, **kwargs)
51
+
52
+ def read_table(self, *args, **kwargs):
53
+ kwargs = self.update_kwargs(kwargs)
54
+ return read_table(*args, **kwargs)
55
+
56
+ def read_table_check_warnings(
57
+ self,
58
+ warn_type: type[Warning],
59
+ warn_msg: str,
60
+ *args,
61
+ raise_on_extra_warnings=True,
62
+ **kwargs,
63
+ ):
64
+ # We need to check the stacklevel here instead of in the tests
65
+ # since this is where read_table is called and where the warning
66
+ # should point to.
67
+ kwargs = self.update_kwargs(kwargs)
68
+ with tm.assert_produces_warning(
69
+ warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings
70
+ ):
71
+ return read_table(*args, **kwargs)
72
+
73
+
74
+ class CParser(BaseParser):
75
+ engine = "c"
76
+ float_precision_choices = [None, "high", "round_trip"]
77
+
78
+
79
+ class CParserHighMemory(CParser):
80
+ low_memory = False
81
+
82
+
83
+ class CParserLowMemory(CParser):
84
+ low_memory = True
85
+
86
+
87
+ class PythonParser(BaseParser):
88
+ engine = "python"
89
+ float_precision_choices = [None]
90
+
91
+
92
+ class PyArrowParser(BaseParser):
93
+ engine = "pyarrow"
94
+ float_precision_choices = [None]
95
+
96
+
97
+ @pytest.fixture
98
+ def csv_dir_path(datapath):
99
+ """
100
+ The directory path to the data files needed for parser tests.
101
+ """
102
+ return datapath("io", "parser", "data")
103
+
104
+
105
+ @pytest.fixture
106
+ def csv1(datapath):
107
+ """
108
+ The path to the data file "test1.csv" needed for parser tests.
109
+ """
110
+ return os.path.join(datapath("io", "data", "csv"), "test1.csv")
111
+
112
+
113
+ _cParserHighMemory = CParserHighMemory
114
+ _cParserLowMemory = CParserLowMemory
115
+ _pythonParser = PythonParser
116
+ _pyarrowParser = PyArrowParser
117
+
118
+ _py_parsers_only = [_pythonParser]
119
+ _c_parsers_only = [_cParserHighMemory, _cParserLowMemory]
120
+ _pyarrow_parsers_only = [pytest.param(_pyarrowParser, marks=pytest.mark.single_cpu)]
121
+
122
+ _all_parsers = [*_c_parsers_only, *_py_parsers_only, *_pyarrow_parsers_only]
123
+
124
+ _py_parser_ids = ["python"]
125
+ _c_parser_ids = ["c_high", "c_low"]
126
+ _pyarrow_parsers_ids = ["pyarrow"]
127
+
128
+ _all_parser_ids = [*_c_parser_ids, *_py_parser_ids, *_pyarrow_parsers_ids]
129
+
130
+
131
+ @pytest.fixture(params=_all_parsers, ids=_all_parser_ids)
132
+ def all_parsers(request):
133
+ """
134
+ Fixture all of the CSV parsers.
135
+ """
136
+ parser = request.param()
137
+ if parser.engine == "pyarrow":
138
+ pytest.importorskip("pyarrow", VERSIONS["pyarrow"])
139
+ # Try finding a way to disable threads all together
140
+ # for more stable CI runs
141
+ import pyarrow
142
+
143
+ pyarrow.set_cpu_count(1)
144
+ return parser
145
+
146
+
147
+ @pytest.fixture(params=_c_parsers_only, ids=_c_parser_ids)
148
+ def c_parser_only(request):
149
+ """
150
+ Fixture all of the CSV parsers using the C engine.
151
+ """
152
+ return request.param()
153
+
154
+
155
+ @pytest.fixture(params=_py_parsers_only, ids=_py_parser_ids)
156
+ def python_parser_only(request):
157
+ """
158
+ Fixture all of the CSV parsers using the Python engine.
159
+ """
160
+ return request.param()
161
+
162
+
163
+ @pytest.fixture(params=_pyarrow_parsers_only, ids=_pyarrow_parsers_ids)
164
+ def pyarrow_parser_only(request):
165
+ """
166
+ Fixture all of the CSV parsers using the Pyarrow engine.
167
+ """
168
+ return request.param()
169
+
170
+
171
+ def _get_all_parser_float_precision_combinations():
172
+ """
173
+ Return all allowable parser and float precision
174
+ combinations and corresponding ids.
175
+ """
176
+ params = []
177
+ ids = []
178
+ for parser, parser_id in zip(_all_parsers, _all_parser_ids):
179
+ if hasattr(parser, "values"):
180
+ # Wrapped in pytest.param, get the actual parser back
181
+ parser = parser.values[0]
182
+ for precision in parser.float_precision_choices:
183
+ # Re-wrap in pytest.param for pyarrow
184
+ mark = pytest.mark.single_cpu if parser.engine == "pyarrow" else ()
185
+ param = pytest.param((parser(), precision), marks=mark)
186
+ params.append(param)
187
+ ids.append(f"{parser_id}-{precision}")
188
+
189
+ return {"params": params, "ids": ids}
190
+
191
+
192
+ @pytest.fixture(
193
+ params=_get_all_parser_float_precision_combinations()["params"],
194
+ ids=_get_all_parser_float_precision_combinations()["ids"],
195
+ )
196
+ def all_parsers_all_precisions(request):
197
+ """
198
+ Fixture for all allowable combinations of parser
199
+ and float precision
200
+ """
201
+ return request.param
202
+
203
+
204
+ _utf_values = [8, 16, 32]
205
+
206
+ _encoding_seps = ["", "-", "_"]
207
+ _encoding_prefixes = ["utf", "UTF"]
208
+
209
+ _encoding_fmts = [
210
+ f"{prefix}{sep}{{0}}" for sep in _encoding_seps for prefix in _encoding_prefixes
211
+ ]
212
+
213
+
214
+ @pytest.fixture(params=_utf_values)
215
+ def utf_value(request):
216
+ """
217
+ Fixture for all possible integer values for a UTF encoding.
218
+ """
219
+ return request.param
220
+
221
+
222
+ @pytest.fixture(params=_encoding_fmts)
223
+ def encoding_fmt(request):
224
+ """
225
+ Fixture for all possible string formats of a UTF encoding.
226
+ """
227
+ return request.param
228
+
229
+
230
+ @pytest.fixture(
231
+ params=[
232
+ ("-1,0", -1.0),
233
+ ("-1,2e0", -1.2),
234
+ ("-1e0", -1.0),
235
+ ("+1e0", 1.0),
236
+ ("+1e+0", 1.0),
237
+ ("+1e-1", 0.1),
238
+ ("+,1e1", 1.0),
239
+ ("+1,e0", 1.0),
240
+ ("-,1e1", -1.0),
241
+ ("-1,e0", -1.0),
242
+ ("0,1", 0.1),
243
+ ("1,", 1.0),
244
+ (",1", 0.1),
245
+ ("-,1", -0.1),
246
+ ("1_,", 1.0),
247
+ ("1_234,56", 1234.56),
248
+ ("1_234,56e0", 1234.56),
249
+ # negative cases; must not parse as float
250
+ ("_", "_"),
251
+ ("-_", "-_"),
252
+ ("-_1", "-_1"),
253
+ ("-_1e0", "-_1e0"),
254
+ ("_1", "_1"),
255
+ ("_1,", "_1,"),
256
+ ("_1,_", "_1,_"),
257
+ ("_1e0", "_1e0"),
258
+ ("1,2e_1", "1,2e_1"),
259
+ ("1,2e1_0", "1,2e1_0"),
260
+ ("1,_2", "1,_2"),
261
+ (",1__2", ",1__2"),
262
+ (",1e", ",1e"),
263
+ ("-,1e", "-,1e"),
264
+ ("1_000,000_000", "1_000,000_000"),
265
+ ("1,e1_2", "1,e1_2"),
266
+ ("e11,2", "e11,2"),
267
+ ("1e11,2", "1e11,2"),
268
+ ("1,2,2", "1,2,2"),
269
+ ("1,2_1", "1,2_1"),
270
+ ("1,2e-10e1", "1,2e-10e1"),
271
+ ("--1,2", "--1,2"),
272
+ ("1a_2,1", "1a_2,1"),
273
+ ("1,2E-1", 0.12),
274
+ ("1,2E1", 12.0),
275
+ ]
276
+ )
277
+ def numeric_decimal(request):
278
+ """
279
+ Fixture for all numeric formats which should get recognized. The first entry
280
+ represents the value to read while the second represents the expected result.
281
+ """
282
+ return request.param
283
+
284
+
285
+ @pytest.fixture
286
+ def pyarrow_xfail(request):
287
+ """
288
+ Fixture that xfails a test if the engine is pyarrow.
289
+
290
+ Use if failure is do to unsupported keywords or inconsistent results.
291
+ """
292
+ if "all_parsers" in request.fixturenames:
293
+ parser = request.getfixturevalue("all_parsers")
294
+ elif "all_parsers_all_precisions" in request.fixturenames:
295
+ # Return value is tuple of (engine, precision)
296
+ parser = request.getfixturevalue("all_parsers_all_precisions")[0]
297
+ else:
298
+ return
299
+ if parser.engine == "pyarrow":
300
+ mark = pytest.mark.xfail(reason="pyarrow doesn't support this.")
301
+ request.applymarker(mark)
302
+
303
+
304
+ @pytest.fixture
305
+ def pyarrow_skip(request):
306
+ """
307
+ Fixture that skips a test if the engine is pyarrow.
308
+
309
+ Use if failure is do a parsing failure from pyarrow.csv.read_csv
310
+ """
311
+ if "all_parsers" in request.fixturenames:
312
+ parser = request.getfixturevalue("all_parsers")
313
+ elif "all_parsers_all_precisions" in request.fixturenames:
314
+ # Return value is tuple of (engine, precision)
315
+ parser = request.getfixturevalue("all_parsers_all_precisions")[0]
316
+ else:
317
+ return
318
+ if parser.engine == "pyarrow":
319
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_c_parser_only.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that apply specifically to the CParser. Unless specifically stated
3
+ as a CParser-specific issue, the goal is to eventually move as many of
4
+ these tests out of this module as soon as the Python parser can accept
5
+ further arguments when parsing.
6
+ """
7
+ from decimal import Decimal
8
+ from io import (
9
+ BytesIO,
10
+ StringIO,
11
+ TextIOWrapper,
12
+ )
13
+ import mmap
14
+ import os
15
+ import tarfile
16
+
17
+ import numpy as np
18
+ import pytest
19
+
20
+ from pandas.compat.numpy import np_version_gte1p24
21
+ from pandas.errors import (
22
+ ParserError,
23
+ ParserWarning,
24
+ )
25
+ import pandas.util._test_decorators as td
26
+
27
+ from pandas import (
28
+ DataFrame,
29
+ concat,
30
+ )
31
+ import pandas._testing as tm
32
+
33
+
34
+ @pytest.mark.parametrize(
35
+ "malformed",
36
+ ["1\r1\r1\r 1\r 1\r", "1\r1\r1\r 1\r 1\r11\r", "1\r1\r1\r 1\r 1\r11\r1\r"],
37
+ ids=["words pointer", "stream pointer", "lines pointer"],
38
+ )
39
+ def test_buffer_overflow(c_parser_only, malformed):
40
+ # see gh-9205: test certain malformed input files that cause
41
+ # buffer overflows in tokenizer.c
42
+ msg = "Buffer overflow caught - possible malformed input file."
43
+ parser = c_parser_only
44
+
45
+ with pytest.raises(ParserError, match=msg):
46
+ parser.read_csv(StringIO(malformed))
47
+
48
+
49
+ def test_delim_whitespace_custom_terminator(c_parser_only):
50
+ # See gh-12912
51
+ data = "a b c~1 2 3~4 5 6~7 8 9"
52
+ parser = c_parser_only
53
+
54
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
55
+ with tm.assert_produces_warning(
56
+ FutureWarning, match=depr_msg, check_stacklevel=False
57
+ ):
58
+ df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True)
59
+ expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
60
+ tm.assert_frame_equal(df, expected)
61
+
62
+
63
+ def test_dtype_and_names_error(c_parser_only):
64
+ # see gh-8833: passing both dtype and names
65
+ # resulting in an error reporting issue
66
+ parser = c_parser_only
67
+ data = """
68
+ 1.0 1
69
+ 2.0 2
70
+ 3.0 3
71
+ """
72
+ # base cases
73
+ result = parser.read_csv(StringIO(data), sep=r"\s+", header=None)
74
+ expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
75
+ tm.assert_frame_equal(result, expected)
76
+
77
+ result = parser.read_csv(StringIO(data), sep=r"\s+", header=None, names=["a", "b"])
78
+ expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["a", "b"])
79
+ tm.assert_frame_equal(result, expected)
80
+
81
+ # fallback casting
82
+ result = parser.read_csv(
83
+ StringIO(data), sep=r"\s+", header=None, names=["a", "b"], dtype={"a": np.int32}
84
+ )
85
+ expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=["a", "b"])
86
+ expected["a"] = expected["a"].astype(np.int32)
87
+ tm.assert_frame_equal(result, expected)
88
+
89
+ data = """
90
+ 1.0 1
91
+ nan 2
92
+ 3.0 3
93
+ """
94
+ # fallback casting, but not castable
95
+ warning = RuntimeWarning if np_version_gte1p24 else None
96
+ with pytest.raises(ValueError, match="cannot safely convert"):
97
+ with tm.assert_produces_warning(warning, check_stacklevel=False):
98
+ parser.read_csv(
99
+ StringIO(data),
100
+ sep=r"\s+",
101
+ header=None,
102
+ names=["a", "b"],
103
+ dtype={"a": np.int32},
104
+ )
105
+
106
+
107
+ @pytest.mark.parametrize(
108
+ "match,kwargs",
109
+ [
110
+ # For each of these cases, all of the dtypes are valid, just unsupported.
111
+ (
112
+ (
113
+ "the dtype datetime64 is not supported for parsing, "
114
+ "pass this column using parse_dates instead"
115
+ ),
116
+ {"dtype": {"A": "datetime64", "B": "float64"}},
117
+ ),
118
+ (
119
+ (
120
+ "the dtype datetime64 is not supported for parsing, "
121
+ "pass this column using parse_dates instead"
122
+ ),
123
+ {"dtype": {"A": "datetime64", "B": "float64"}, "parse_dates": ["B"]},
124
+ ),
125
+ (
126
+ "the dtype timedelta64 is not supported for parsing",
127
+ {"dtype": {"A": "timedelta64", "B": "float64"}},
128
+ ),
129
+ (
130
+ f"the dtype {tm.ENDIAN}U8 is not supported for parsing",
131
+ {"dtype": {"A": "U8"}},
132
+ ),
133
+ ],
134
+ ids=["dt64-0", "dt64-1", "td64", f"{tm.ENDIAN}U8"],
135
+ )
136
+ def test_unsupported_dtype(c_parser_only, match, kwargs):
137
+ parser = c_parser_only
138
+ df = DataFrame(
139
+ np.random.default_rng(2).random((5, 2)),
140
+ columns=list("AB"),
141
+ index=["1A", "1B", "1C", "1D", "1E"],
142
+ )
143
+
144
+ with tm.ensure_clean("__unsupported_dtype__.csv") as path:
145
+ df.to_csv(path)
146
+
147
+ with pytest.raises(TypeError, match=match):
148
+ parser.read_csv(path, index_col=0, **kwargs)
149
+
150
+
151
+ @td.skip_if_32bit
152
+ @pytest.mark.slow
153
+ # test numbers between 1 and 2
154
+ @pytest.mark.parametrize("num", np.linspace(1.0, 2.0, num=21))
155
+ def test_precise_conversion(c_parser_only, num):
156
+ parser = c_parser_only
157
+
158
+ normal_errors = []
159
+ precise_errors = []
160
+
161
+ def error(val: float, actual_val: Decimal) -> Decimal:
162
+ return abs(Decimal(f"{val:.100}") - actual_val)
163
+
164
+ # 25 decimal digits of precision
165
+ text = f"a\n{num:.25}"
166
+
167
+ normal_val = float(
168
+ parser.read_csv(StringIO(text), float_precision="legacy")["a"][0]
169
+ )
170
+ precise_val = float(parser.read_csv(StringIO(text), float_precision="high")["a"][0])
171
+ roundtrip_val = float(
172
+ parser.read_csv(StringIO(text), float_precision="round_trip")["a"][0]
173
+ )
174
+ actual_val = Decimal(text[2:])
175
+
176
+ normal_errors.append(error(normal_val, actual_val))
177
+ precise_errors.append(error(precise_val, actual_val))
178
+
179
+ # round-trip should match float()
180
+ assert roundtrip_val == float(text[2:])
181
+
182
+ assert sum(precise_errors) <= sum(normal_errors)
183
+ assert max(precise_errors) <= max(normal_errors)
184
+
185
+
186
+ def test_usecols_dtypes(c_parser_only):
187
+ parser = c_parser_only
188
+ data = """\
189
+ 1,2,3
190
+ 4,5,6
191
+ 7,8,9
192
+ 10,11,12"""
193
+
194
+ result = parser.read_csv(
195
+ StringIO(data),
196
+ usecols=(0, 1, 2),
197
+ names=("a", "b", "c"),
198
+ header=None,
199
+ converters={"a": str},
200
+ dtype={"b": int, "c": float},
201
+ )
202
+ result2 = parser.read_csv(
203
+ StringIO(data),
204
+ usecols=(0, 2),
205
+ names=("a", "b", "c"),
206
+ header=None,
207
+ converters={"a": str},
208
+ dtype={"b": int, "c": float},
209
+ )
210
+
211
+ assert (result.dtypes == [object, int, float]).all()
212
+ assert (result2.dtypes == [object, float]).all()
213
+
214
+
215
+ def test_disable_bool_parsing(c_parser_only):
216
+ # see gh-2090
217
+
218
+ parser = c_parser_only
219
+ data = """A,B,C
220
+ Yes,No,Yes
221
+ No,Yes,Yes
222
+ Yes,,Yes
223
+ No,No,No"""
224
+
225
+ result = parser.read_csv(StringIO(data), dtype=object)
226
+ assert (result.dtypes == object).all()
227
+
228
+ result = parser.read_csv(StringIO(data), dtype=object, na_filter=False)
229
+ assert result["B"][2] == ""
230
+
231
+
232
+ def test_custom_lineterminator(c_parser_only):
233
+ parser = c_parser_only
234
+ data = "a,b,c~1,2,3~4,5,6"
235
+
236
+ result = parser.read_csv(StringIO(data), lineterminator="~")
237
+ expected = parser.read_csv(StringIO(data.replace("~", "\n")))
238
+
239
+ tm.assert_frame_equal(result, expected)
240
+
241
+
242
+ def test_parse_ragged_csv(c_parser_only):
243
+ parser = c_parser_only
244
+ data = """1,2,3
245
+ 1,2,3,4
246
+ 1,2,3,4,5
247
+ 1,2
248
+ 1,2,3,4"""
249
+
250
+ nice_data = """1,2,3,,
251
+ 1,2,3,4,
252
+ 1,2,3,4,5
253
+ 1,2,,,
254
+ 1,2,3,4,"""
255
+ result = parser.read_csv(
256
+ StringIO(data), header=None, names=["a", "b", "c", "d", "e"]
257
+ )
258
+
259
+ expected = parser.read_csv(
260
+ StringIO(nice_data), header=None, names=["a", "b", "c", "d", "e"]
261
+ )
262
+
263
+ tm.assert_frame_equal(result, expected)
264
+
265
+ # too many columns, cause segfault if not careful
266
+ data = "1,2\n3,4,5"
267
+
268
+ result = parser.read_csv(StringIO(data), header=None, names=range(50))
269
+ expected = parser.read_csv(StringIO(data), header=None, names=range(3)).reindex(
270
+ columns=range(50)
271
+ )
272
+
273
+ tm.assert_frame_equal(result, expected)
274
+
275
+
276
+ def test_tokenize_CR_with_quoting(c_parser_only):
277
+ # see gh-3453
278
+ parser = c_parser_only
279
+ data = ' a,b,c\r"a,b","e,d","f,f"'
280
+
281
+ result = parser.read_csv(StringIO(data), header=None)
282
+ expected = parser.read_csv(StringIO(data.replace("\r", "\n")), header=None)
283
+ tm.assert_frame_equal(result, expected)
284
+
285
+ result = parser.read_csv(StringIO(data))
286
+ expected = parser.read_csv(StringIO(data.replace("\r", "\n")))
287
+ tm.assert_frame_equal(result, expected)
288
+
289
+
290
+ @pytest.mark.slow
291
+ @pytest.mark.parametrize("count", [3 * 2**n for n in range(6)])
292
+ def test_grow_boundary_at_cap(c_parser_only, count):
293
+ # See gh-12494
294
+ #
295
+ # Cause of error was that the C parser
296
+ # was not increasing the buffer size when
297
+ # the desired space would fill the buffer
298
+ # to capacity, which would later cause a
299
+ # buffer overflow error when checking the
300
+ # EOF terminator of the CSV stream.
301
+ # 3 * 2^n commas was observed to break the parser
302
+ parser = c_parser_only
303
+
304
+ with StringIO("," * count) as s:
305
+ expected = DataFrame(columns=[f"Unnamed: {i}" for i in range(count + 1)])
306
+ df = parser.read_csv(s)
307
+ tm.assert_frame_equal(df, expected)
308
+
309
+
310
+ @pytest.mark.slow
311
+ @pytest.mark.parametrize("encoding", [None, "utf-8"])
312
+ def test_parse_trim_buffers(c_parser_only, encoding):
313
+ # This test is part of a bugfix for gh-13703. It attempts to
314
+ # to stress the system memory allocator, to cause it to move the
315
+ # stream buffer and either let the OS reclaim the region, or let
316
+ # other memory requests of parser otherwise modify the contents
317
+ # of memory space, where it was formally located.
318
+ # This test is designed to cause a `segfault` with unpatched
319
+ # `tokenizer.c`. Sometimes the test fails on `segfault`, other
320
+ # times it fails due to memory corruption, which causes the
321
+ # loaded DataFrame to differ from the expected one.
322
+
323
+ # Also force 'utf-8' encoding, so that `_string_convert` would take
324
+ # a different execution branch.
325
+
326
+ parser = c_parser_only
327
+
328
+ # Generate a large mixed-type CSV file on-the-fly (one record is
329
+ # approx 1.5KiB).
330
+ record_ = (
331
+ """9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z"""
332
+ """ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,"""
333
+ """ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9"""
334
+ """99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,"""
335
+ """9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9."""
336
+ """99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999."""
337
+ """99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ"""
338
+ """ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ"""
339
+ """ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z"""
340
+ """ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,"""
341
+ """9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,"""
342
+ """999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,"""
343
+ """,,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999"""
344
+ """,9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9."""
345
+ """999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,"""
346
+ """,9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z"""
347
+ """ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ"""
348
+ """,999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99"""
349
+ """,,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-"""
350
+ """9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9"""
351
+ """.99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,"""
352
+ """,,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9."""
353
+ """99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ"""
354
+ """ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ"""
355
+ """-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ"""
356
+ """ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ"""
357
+ """,9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99"""
358
+ """,99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9"""
359
+ """.99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,"""
360
+ )
361
+
362
+ # Set the number of lines so that a call to `parser_trim_buffers`
363
+ # is triggered: after a couple of full chunks are consumed a
364
+ # relatively small 'residual' chunk would cause reallocation
365
+ # within the parser.
366
+ chunksize, n_lines = 128, 2 * 128 + 15
367
+ csv_data = "\n".join([record_] * n_lines) + "\n"
368
+
369
+ # We will use StringIO to load the CSV from this text buffer.
370
+ # pd.read_csv() will iterate over the file in chunks and will
371
+ # finally read a residual chunk of really small size.
372
+
373
+ # Generate the expected output: manually create the dataframe
374
+ # by splitting by comma and repeating the `n_lines` times.
375
+ row = tuple(val_ if val_ else np.nan for val_ in record_.split(","))
376
+ expected = DataFrame(
377
+ [row for _ in range(n_lines)], dtype=object, columns=None, index=None
378
+ )
379
+
380
+ # Iterate over the CSV file in chunks of `chunksize` lines
381
+ with parser.read_csv(
382
+ StringIO(csv_data),
383
+ header=None,
384
+ dtype=object,
385
+ chunksize=chunksize,
386
+ encoding=encoding,
387
+ ) as chunks_:
388
+ result = concat(chunks_, axis=0, ignore_index=True)
389
+
390
+ # Check for data corruption if there was no segfault
391
+ tm.assert_frame_equal(result, expected)
392
+
393
+
394
+ def test_internal_null_byte(c_parser_only):
395
+ # see gh-14012
396
+ #
397
+ # The null byte ('\x00') should not be used as a
398
+ # true line terminator, escape character, or comment
399
+ # character, only as a placeholder to indicate that
400
+ # none was specified.
401
+ #
402
+ # This test should be moved to test_common.py ONLY when
403
+ # Python's csv class supports parsing '\x00'.
404
+ parser = c_parser_only
405
+
406
+ names = ["a", "b", "c"]
407
+ data = "1,2,3\n4,\x00,6\n7,8,9"
408
+ expected = DataFrame([[1, 2.0, 3], [4, np.nan, 6], [7, 8, 9]], columns=names)
409
+
410
+ result = parser.read_csv(StringIO(data), names=names)
411
+ tm.assert_frame_equal(result, expected)
412
+
413
+
414
+ def test_read_nrows_large(c_parser_only):
415
+ # gh-7626 - Read only nrows of data in for large inputs (>262144b)
416
+ parser = c_parser_only
417
+ header_narrow = "\t".join(["COL_HEADER_" + str(i) for i in range(10)]) + "\n"
418
+ data_narrow = "\t".join(["somedatasomedatasomedata1" for _ in range(10)]) + "\n"
419
+ header_wide = "\t".join(["COL_HEADER_" + str(i) for i in range(15)]) + "\n"
420
+ data_wide = "\t".join(["somedatasomedatasomedata2" for _ in range(15)]) + "\n"
421
+ test_input = header_narrow + data_narrow * 1050 + header_wide + data_wide * 2
422
+
423
+ df = parser.read_csv(StringIO(test_input), sep="\t", nrows=1010)
424
+
425
+ assert df.size == 1010 * 10
426
+
427
+
428
+ def test_float_precision_round_trip_with_text(c_parser_only):
429
+ # see gh-15140
430
+ parser = c_parser_only
431
+ df = parser.read_csv(StringIO("a"), header=None, float_precision="round_trip")
432
+ tm.assert_frame_equal(df, DataFrame({0: ["a"]}))
433
+
434
+
435
+ def test_large_difference_in_columns(c_parser_only):
436
+ # see gh-14125
437
+ parser = c_parser_only
438
+
439
+ count = 10000
440
+ large_row = ("X," * count)[:-1] + "\n"
441
+ normal_row = "XXXXXX XXXXXX,111111111111111\n"
442
+ test_input = (large_row + normal_row * 6)[:-1]
443
+
444
+ result = parser.read_csv(StringIO(test_input), header=None, usecols=[0])
445
+ rows = test_input.split("\n")
446
+
447
+ expected = DataFrame([row.split(",")[0] for row in rows])
448
+ tm.assert_frame_equal(result, expected)
449
+
450
+
451
+ def test_data_after_quote(c_parser_only):
452
+ # see gh-15910
453
+ parser = c_parser_only
454
+
455
+ data = 'a\n1\n"b"a'
456
+ result = parser.read_csv(StringIO(data))
457
+
458
+ expected = DataFrame({"a": ["1", "ba"]})
459
+ tm.assert_frame_equal(result, expected)
460
+
461
+
462
+ def test_comment_whitespace_delimited(c_parser_only):
463
+ parser = c_parser_only
464
+ test_input = """\
465
+ 1 2
466
+ 2 2 3
467
+ 3 2 3 # 3 fields
468
+ 4 2 3# 3 fields
469
+ 5 2 # 2 fields
470
+ 6 2# 2 fields
471
+ 7 # 1 field, NaN
472
+ 8# 1 field, NaN
473
+ 9 2 3 # skipped line
474
+ # comment"""
475
+ with tm.assert_produces_warning(
476
+ ParserWarning, match="Skipping line", check_stacklevel=False
477
+ ):
478
+ df = parser.read_csv(
479
+ StringIO(test_input),
480
+ comment="#",
481
+ header=None,
482
+ delimiter="\\s+",
483
+ skiprows=0,
484
+ on_bad_lines="warn",
485
+ )
486
+ expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]])
487
+ tm.assert_frame_equal(df, expected)
488
+
489
+
490
+ def test_file_like_no_next(c_parser_only):
491
+ # gh-16530: the file-like need not have a "next" or "__next__"
492
+ # attribute despite having an "__iter__" attribute.
493
+ #
494
+ # NOTE: This is only true for the C engine, not Python engine.
495
+ class NoNextBuffer(StringIO):
496
+ def __next__(self):
497
+ raise AttributeError("No next method")
498
+
499
+ next = __next__
500
+
501
+ parser = c_parser_only
502
+ data = "a\n1"
503
+
504
+ expected = DataFrame({"a": [1]})
505
+ result = parser.read_csv(NoNextBuffer(data))
506
+
507
+ tm.assert_frame_equal(result, expected)
508
+
509
+
510
+ def test_buffer_rd_bytes_bad_unicode(c_parser_only):
511
+ # see gh-22748
512
+ t = BytesIO(b"\xB0")
513
+ t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape")
514
+ msg = "'utf-8' codec can't encode character"
515
+ with pytest.raises(UnicodeError, match=msg):
516
+ c_parser_only.read_csv(t, encoding="UTF-8")
517
+
518
+
519
+ @pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"])
520
+ def test_read_tarfile(c_parser_only, csv_dir_path, tar_suffix):
521
+ # see gh-16530
522
+ #
523
+ # Unfortunately, Python's CSV library can't handle
524
+ # tarfile objects (expects string, not bytes when
525
+ # iterating through a file-like).
526
+ parser = c_parser_only
527
+ tar_path = os.path.join(csv_dir_path, "tar_csv" + tar_suffix)
528
+
529
+ with tarfile.open(tar_path, "r") as tar:
530
+ data_file = tar.extractfile("tar_data.csv")
531
+
532
+ out = parser.read_csv(data_file)
533
+ expected = DataFrame({"a": [1]})
534
+ tm.assert_frame_equal(out, expected)
535
+
536
+
537
+ def test_chunk_whitespace_on_boundary(c_parser_only):
538
+ # see gh-9735: this issue is C parser-specific (bug when
539
+ # parsing whitespace and characters at chunk boundary)
540
+ #
541
+ # This test case has a field too large for the Python parser / CSV library.
542
+ parser = c_parser_only
543
+
544
+ chunk1 = "a" * (1024 * 256 - 2) + "\na"
545
+ chunk2 = "\n a"
546
+ result = parser.read_csv(StringIO(chunk1 + chunk2), header=None)
547
+
548
+ expected = DataFrame(["a" * (1024 * 256 - 2), "a", " a"])
549
+ tm.assert_frame_equal(result, expected)
550
+
551
+
552
+ def test_file_handles_mmap(c_parser_only, csv1):
553
+ # gh-14418
554
+ #
555
+ # Don't close user provided file handles.
556
+ parser = c_parser_only
557
+
558
+ with open(csv1, encoding="utf-8") as f:
559
+ with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m:
560
+ parser.read_csv(m)
561
+ assert not m.closed
562
+
563
+
564
+ def test_file_binary_mode(c_parser_only):
565
+ # see gh-23779
566
+ parser = c_parser_only
567
+ expected = DataFrame([[1, 2, 3], [4, 5, 6]])
568
+
569
+ with tm.ensure_clean() as path:
570
+ with open(path, "w", encoding="utf-8") as f:
571
+ f.write("1,2,3\n4,5,6")
572
+
573
+ with open(path, "rb") as f:
574
+ result = parser.read_csv(f, header=None)
575
+ tm.assert_frame_equal(result, expected)
576
+
577
+
578
+ def test_unix_style_breaks(c_parser_only):
579
+ # GH 11020
580
+ parser = c_parser_only
581
+ with tm.ensure_clean() as path:
582
+ with open(path, "w", newline="\n", encoding="utf-8") as f:
583
+ f.write("blah\n\ncol_1,col_2,col_3\n\n")
584
+ result = parser.read_csv(path, skiprows=2, encoding="utf-8", engine="c")
585
+ expected = DataFrame(columns=["col_1", "col_2", "col_3"])
586
+ tm.assert_frame_equal(result, expected)
587
+
588
+
589
+ @pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])
590
+ @pytest.mark.parametrize(
591
+ "data,thousands,decimal",
592
+ [
593
+ (
594
+ """A|B|C
595
+ 1|2,334.01|5
596
+ 10|13|10.
597
+ """,
598
+ ",",
599
+ ".",
600
+ ),
601
+ (
602
+ """A|B|C
603
+ 1|2.334,01|5
604
+ 10|13|10,
605
+ """,
606
+ ".",
607
+ ",",
608
+ ),
609
+ ],
610
+ )
611
+ def test_1000_sep_with_decimal(
612
+ c_parser_only, data, thousands, decimal, float_precision
613
+ ):
614
+ parser = c_parser_only
615
+ expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})
616
+
617
+ result = parser.read_csv(
618
+ StringIO(data),
619
+ sep="|",
620
+ thousands=thousands,
621
+ decimal=decimal,
622
+ float_precision=float_precision,
623
+ )
624
+ tm.assert_frame_equal(result, expected)
625
+
626
+
627
+ def test_float_precision_options(c_parser_only):
628
+ # GH 17154, 36228
629
+ parser = c_parser_only
630
+ s = "foo\n243.164\n"
631
+ df = parser.read_csv(StringIO(s))
632
+ df2 = parser.read_csv(StringIO(s), float_precision="high")
633
+
634
+ tm.assert_frame_equal(df, df2)
635
+
636
+ df3 = parser.read_csv(StringIO(s), float_precision="legacy")
637
+
638
+ assert not df.iloc[0, 0] == df3.iloc[0, 0]
639
+
640
+ msg = "Unrecognized float_precision option: junk"
641
+
642
+ with pytest.raises(ValueError, match=msg):
643
+ parser.read_csv(StringIO(s), float_precision="junk")
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_comment.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that comments are properly handled during parsing
3
+ for all of the parsers defined in parsers.py
4
+ """
5
+ from io import StringIO
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas import DataFrame
11
+ import pandas._testing as tm
12
+
13
+
14
+ @pytest.mark.parametrize("na_values", [None, ["NaN"]])
15
+ def test_comment(all_parsers, na_values):
16
+ parser = all_parsers
17
+ data = """A,B,C
18
+ 1,2.,4.#hello world
19
+ 5.,NaN,10.0
20
+ """
21
+ expected = DataFrame(
22
+ [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
23
+ )
24
+ if parser.engine == "pyarrow":
25
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
26
+ with pytest.raises(ValueError, match=msg):
27
+ parser.read_csv(StringIO(data), comment="#", na_values=na_values)
28
+ return
29
+ result = parser.read_csv(StringIO(data), comment="#", na_values=na_values)
30
+ tm.assert_frame_equal(result, expected)
31
+
32
+
33
+ @pytest.mark.parametrize(
34
+ "read_kwargs", [{}, {"lineterminator": "*"}, {"delim_whitespace": True}]
35
+ )
36
+ def test_line_comment(all_parsers, read_kwargs, request):
37
+ parser = all_parsers
38
+ data = """# empty
39
+ A,B,C
40
+ 1,2.,4.#hello world
41
+ #ignore this line
42
+ 5.,NaN,10.0
43
+ """
44
+ warn = None
45
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
46
+
47
+ if read_kwargs.get("delim_whitespace"):
48
+ data = data.replace(",", " ")
49
+ warn = FutureWarning
50
+ elif read_kwargs.get("lineterminator"):
51
+ data = data.replace("\n", read_kwargs.get("lineterminator"))
52
+
53
+ read_kwargs["comment"] = "#"
54
+ if parser.engine == "pyarrow":
55
+ if "lineterminator" in read_kwargs:
56
+ msg = (
57
+ "The 'lineterminator' option is not supported with the 'pyarrow' engine"
58
+ )
59
+ else:
60
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
61
+ with pytest.raises(ValueError, match=msg):
62
+ with tm.assert_produces_warning(
63
+ warn, match=depr_msg, check_stacklevel=False
64
+ ):
65
+ parser.read_csv(StringIO(data), **read_kwargs)
66
+ return
67
+ elif parser.engine == "python" and read_kwargs.get("lineterminator"):
68
+ msg = r"Custom line terminators not supported in python parser \(yet\)"
69
+ with pytest.raises(ValueError, match=msg):
70
+ with tm.assert_produces_warning(
71
+ warn, match=depr_msg, check_stacklevel=False
72
+ ):
73
+ parser.read_csv(StringIO(data), **read_kwargs)
74
+ return
75
+
76
+ with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False):
77
+ result = parser.read_csv(StringIO(data), **read_kwargs)
78
+
79
+ expected = DataFrame(
80
+ [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
81
+ )
82
+ tm.assert_frame_equal(result, expected)
83
+
84
+
85
+ def test_comment_skiprows(all_parsers):
86
+ parser = all_parsers
87
+ data = """# empty
88
+ random line
89
+ # second empty line
90
+ 1,2,3
91
+ A,B,C
92
+ 1,2.,4.
93
+ 5.,NaN,10.0
94
+ """
95
+ # This should ignore the first four lines (including comments).
96
+ expected = DataFrame(
97
+ [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
98
+ )
99
+ if parser.engine == "pyarrow":
100
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
101
+ with pytest.raises(ValueError, match=msg):
102
+ parser.read_csv(StringIO(data), comment="#", skiprows=4)
103
+ return
104
+
105
+ result = parser.read_csv(StringIO(data), comment="#", skiprows=4)
106
+ tm.assert_frame_equal(result, expected)
107
+
108
+
109
+ def test_comment_header(all_parsers):
110
+ parser = all_parsers
111
+ data = """# empty
112
+ # second empty line
113
+ 1,2,3
114
+ A,B,C
115
+ 1,2.,4.
116
+ 5.,NaN,10.0
117
+ """
118
+ # Header should begin at the second non-comment line.
119
+ expected = DataFrame(
120
+ [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
121
+ )
122
+ if parser.engine == "pyarrow":
123
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
124
+ with pytest.raises(ValueError, match=msg):
125
+ parser.read_csv(StringIO(data), comment="#", header=1)
126
+ return
127
+ result = parser.read_csv(StringIO(data), comment="#", header=1)
128
+ tm.assert_frame_equal(result, expected)
129
+
130
+
131
+ def test_comment_skiprows_header(all_parsers):
132
+ parser = all_parsers
133
+ data = """# empty
134
+ # second empty line
135
+ # third empty line
136
+ X,Y,Z
137
+ 1,2,3
138
+ A,B,C
139
+ 1,2.,4.
140
+ 5.,NaN,10.0
141
+ """
142
+ # Skiprows should skip the first 4 lines (including comments),
143
+ # while header should start from the second non-commented line,
144
+ # starting with line 5.
145
+ expected = DataFrame(
146
+ [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]
147
+ )
148
+ if parser.engine == "pyarrow":
149
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
150
+ with pytest.raises(ValueError, match=msg):
151
+ parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1)
152
+ return
153
+
154
+ result = parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1)
155
+ tm.assert_frame_equal(result, expected)
156
+
157
+
158
+ @pytest.mark.parametrize("comment_char", ["#", "~", "&", "^", "*", "@"])
159
+ def test_custom_comment_char(all_parsers, comment_char):
160
+ parser = all_parsers
161
+ data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
162
+
163
+ if parser.engine == "pyarrow":
164
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
165
+ with pytest.raises(ValueError, match=msg):
166
+ parser.read_csv(
167
+ StringIO(data.replace("#", comment_char)), comment=comment_char
168
+ )
169
+ return
170
+ result = parser.read_csv(
171
+ StringIO(data.replace("#", comment_char)), comment=comment_char
172
+ )
173
+
174
+ expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "b", "c"])
175
+ tm.assert_frame_equal(result, expected)
176
+
177
+
178
+ @pytest.mark.parametrize("header", ["infer", None])
179
+ def test_comment_first_line(all_parsers, header):
180
+ # see gh-4623
181
+ parser = all_parsers
182
+ data = "# notes\na,b,c\n# more notes\n1,2,3"
183
+
184
+ if header is None:
185
+ expected = DataFrame({0: ["a", "1"], 1: ["b", "2"], 2: ["c", "3"]})
186
+ else:
187
+ expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
188
+
189
+ if parser.engine == "pyarrow":
190
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
191
+ with pytest.raises(ValueError, match=msg):
192
+ parser.read_csv(StringIO(data), comment="#", header=header)
193
+ return
194
+ result = parser.read_csv(StringIO(data), comment="#", header=header)
195
+ tm.assert_frame_equal(result, expected)
196
+
197
+
198
+ def test_comment_char_in_default_value(all_parsers, request):
199
+ # GH#34002
200
+ if all_parsers.engine == "c":
201
+ reason = "see gh-34002: works on the python engine but not the c engine"
202
+ # NA value containing comment char is interpreted as comment
203
+ request.applymarker(pytest.mark.xfail(reason=reason, raises=AssertionError))
204
+ parser = all_parsers
205
+
206
+ data = (
207
+ "# this is a comment\n"
208
+ "col1,col2,col3,col4\n"
209
+ "1,2,3,4#inline comment\n"
210
+ "4,5#,6,10\n"
211
+ "7,8,#N/A,11\n"
212
+ )
213
+ if parser.engine == "pyarrow":
214
+ msg = "The 'comment' option is not supported with the 'pyarrow' engine"
215
+ with pytest.raises(ValueError, match=msg):
216
+ parser.read_csv(StringIO(data), comment="#", na_values="#N/A")
217
+ return
218
+ result = parser.read_csv(StringIO(data), comment="#", na_values="#N/A")
219
+ expected = DataFrame(
220
+ {
221
+ "col1": [1, 4, 7],
222
+ "col2": [2, 5, 8],
223
+ "col3": [3.0, np.nan, np.nan],
224
+ "col4": [4.0, np.nan, 11.0],
225
+ }
226
+ )
227
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_compression.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests compressed data parsing functionality for all
3
+ of the parsers defined in parsers.py
4
+ """
5
+
6
+ import os
7
+ from pathlib import Path
8
+ import tarfile
9
+ import zipfile
10
+
11
+ import pytest
12
+
13
+ from pandas import DataFrame
14
+ import pandas._testing as tm
15
+
16
+ pytestmark = pytest.mark.filterwarnings(
17
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
18
+ )
19
+
20
+
21
+ @pytest.fixture(params=[True, False])
22
+ def buffer(request):
23
+ return request.param
24
+
25
+
26
+ @pytest.fixture
27
+ def parser_and_data(all_parsers, csv1):
28
+ parser = all_parsers
29
+
30
+ with open(csv1, "rb") as f:
31
+ data = f.read()
32
+ expected = parser.read_csv(csv1)
33
+
34
+ return parser, data, expected
35
+
36
+
37
+ @pytest.mark.parametrize("compression", ["zip", "infer", "zip2"])
38
+ def test_zip(parser_and_data, compression):
39
+ parser, data, expected = parser_and_data
40
+
41
+ with tm.ensure_clean("test_file.zip") as path:
42
+ with zipfile.ZipFile(path, mode="w") as tmp:
43
+ tmp.writestr("test_file", data)
44
+
45
+ if compression == "zip2":
46
+ with open(path, "rb") as f:
47
+ result = parser.read_csv(f, compression="zip")
48
+ else:
49
+ result = parser.read_csv(path, compression=compression)
50
+
51
+ tm.assert_frame_equal(result, expected)
52
+
53
+
54
+ @pytest.mark.parametrize("compression", ["zip", "infer"])
55
+ def test_zip_error_multiple_files(parser_and_data, compression):
56
+ parser, data, expected = parser_and_data
57
+
58
+ with tm.ensure_clean("combined_zip.zip") as path:
59
+ inner_file_names = ["test_file", "second_file"]
60
+
61
+ with zipfile.ZipFile(path, mode="w") as tmp:
62
+ for file_name in inner_file_names:
63
+ tmp.writestr(file_name, data)
64
+
65
+ with pytest.raises(ValueError, match="Multiple files"):
66
+ parser.read_csv(path, compression=compression)
67
+
68
+
69
+ def test_zip_error_no_files(parser_and_data):
70
+ parser, _, _ = parser_and_data
71
+
72
+ with tm.ensure_clean() as path:
73
+ with zipfile.ZipFile(path, mode="w"):
74
+ pass
75
+
76
+ with pytest.raises(ValueError, match="Zero files"):
77
+ parser.read_csv(path, compression="zip")
78
+
79
+
80
+ def test_zip_error_invalid_zip(parser_and_data):
81
+ parser, _, _ = parser_and_data
82
+
83
+ with tm.ensure_clean() as path:
84
+ with open(path, "rb") as f:
85
+ with pytest.raises(zipfile.BadZipFile, match="File is not a zip file"):
86
+ parser.read_csv(f, compression="zip")
87
+
88
+
89
+ @pytest.mark.parametrize("filename", [None, "test.{ext}"])
90
+ def test_compression(
91
+ request,
92
+ parser_and_data,
93
+ compression_only,
94
+ buffer,
95
+ filename,
96
+ compression_to_extension,
97
+ ):
98
+ parser, data, expected = parser_and_data
99
+ compress_type = compression_only
100
+
101
+ ext = compression_to_extension[compress_type]
102
+ filename = filename if filename is None else filename.format(ext=ext)
103
+
104
+ if filename and buffer:
105
+ request.applymarker(
106
+ pytest.mark.xfail(
107
+ reason="Cannot deduce compression from buffer of compressed data."
108
+ )
109
+ )
110
+
111
+ with tm.ensure_clean(filename=filename) as path:
112
+ tm.write_to_compressed(compress_type, path, data)
113
+ compression = "infer" if filename else compress_type
114
+
115
+ if buffer:
116
+ with open(path, "rb") as f:
117
+ result = parser.read_csv(f, compression=compression)
118
+ else:
119
+ result = parser.read_csv(path, compression=compression)
120
+
121
+ tm.assert_frame_equal(result, expected)
122
+
123
+
124
+ @pytest.mark.parametrize("ext", [None, "gz", "bz2"])
125
+ def test_infer_compression(all_parsers, csv1, buffer, ext):
126
+ # see gh-9770
127
+ parser = all_parsers
128
+ kwargs = {"index_col": 0, "parse_dates": True}
129
+
130
+ expected = parser.read_csv(csv1, **kwargs)
131
+ kwargs["compression"] = "infer"
132
+
133
+ if buffer:
134
+ with open(csv1, encoding="utf-8") as f:
135
+ result = parser.read_csv(f, **kwargs)
136
+ else:
137
+ ext = "." + ext if ext else ""
138
+ result = parser.read_csv(csv1 + ext, **kwargs)
139
+
140
+ tm.assert_frame_equal(result, expected)
141
+
142
+
143
+ def test_compression_utf_encoding(all_parsers, csv_dir_path, utf_value, encoding_fmt):
144
+ # see gh-18071, gh-24130
145
+ parser = all_parsers
146
+ encoding = encoding_fmt.format(utf_value)
147
+ path = os.path.join(csv_dir_path, f"utf{utf_value}_ex_small.zip")
148
+
149
+ result = parser.read_csv(path, encoding=encoding, compression="zip", sep="\t")
150
+ expected = DataFrame(
151
+ {
152
+ "Country": ["Venezuela", "Venezuela"],
153
+ "Twitter": ["Hugo Chávez Frías", "Henrique Capriles R."],
154
+ }
155
+ )
156
+
157
+ tm.assert_frame_equal(result, expected)
158
+
159
+
160
+ @pytest.mark.parametrize("invalid_compression", ["sfark", "bz3", "zipper"])
161
+ def test_invalid_compression(all_parsers, invalid_compression):
162
+ parser = all_parsers
163
+ compress_kwargs = {"compression": invalid_compression}
164
+
165
+ msg = f"Unrecognized compression type: {invalid_compression}"
166
+
167
+ with pytest.raises(ValueError, match=msg):
168
+ parser.read_csv("test_file.zip", **compress_kwargs)
169
+
170
+
171
+ def test_compression_tar_archive(all_parsers, csv_dir_path):
172
+ parser = all_parsers
173
+ path = os.path.join(csv_dir_path, "tar_csv.tar.gz")
174
+ df = parser.read_csv(path)
175
+ assert list(df.columns) == ["a"]
176
+
177
+
178
+ def test_ignore_compression_extension(all_parsers):
179
+ parser = all_parsers
180
+ df = DataFrame({"a": [0, 1]})
181
+ with tm.ensure_clean("test.csv") as path_csv:
182
+ with tm.ensure_clean("test.csv.zip") as path_zip:
183
+ # make sure to create un-compressed file with zip extension
184
+ df.to_csv(path_csv, index=False)
185
+ Path(path_zip).write_text(
186
+ Path(path_csv).read_text(encoding="utf-8"), encoding="utf-8"
187
+ )
188
+
189
+ tm.assert_frame_equal(parser.read_csv(path_zip, compression=None), df)
190
+
191
+
192
+ def test_writes_tar_gz(all_parsers):
193
+ parser = all_parsers
194
+ data = DataFrame(
195
+ {
196
+ "Country": ["Venezuela", "Venezuela"],
197
+ "Twitter": ["Hugo Chávez Frías", "Henrique Capriles R."],
198
+ }
199
+ )
200
+ with tm.ensure_clean("test.tar.gz") as tar_path:
201
+ data.to_csv(tar_path, index=False)
202
+
203
+ # test that read_csv infers .tar.gz to gzip:
204
+ tm.assert_frame_equal(parser.read_csv(tar_path), data)
205
+
206
+ # test that file is indeed gzipped:
207
+ with tarfile.open(tar_path, "r:gz") as tar:
208
+ result = parser.read_csv(
209
+ tar.extractfile(tar.getnames()[0]), compression="infer"
210
+ )
211
+ tm.assert_frame_equal(result, data)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_concatenate_chunks.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import DtypeWarning
5
+
6
+ import pandas._testing as tm
7
+ from pandas.core.arrays import ArrowExtensionArray
8
+
9
+ from pandas.io.parsers.c_parser_wrapper import _concatenate_chunks
10
+
11
+
12
+ def test_concatenate_chunks_pyarrow():
13
+ # GH#51876
14
+ pa = pytest.importorskip("pyarrow")
15
+ chunks = [
16
+ {0: ArrowExtensionArray(pa.array([1.5, 2.5]))},
17
+ {0: ArrowExtensionArray(pa.array([1, 2]))},
18
+ ]
19
+ result = _concatenate_chunks(chunks)
20
+ expected = ArrowExtensionArray(pa.array([1.5, 2.5, 1.0, 2.0]))
21
+ tm.assert_extension_array_equal(result[0], expected)
22
+
23
+
24
+ def test_concatenate_chunks_pyarrow_strings():
25
+ # GH#51876
26
+ pa = pytest.importorskip("pyarrow")
27
+ chunks = [
28
+ {0: ArrowExtensionArray(pa.array([1.5, 2.5]))},
29
+ {0: ArrowExtensionArray(pa.array(["a", "b"]))},
30
+ ]
31
+ with tm.assert_produces_warning(DtypeWarning, match="have mixed types"):
32
+ result = _concatenate_chunks(chunks)
33
+ expected = np.concatenate(
34
+ [np.array([1.5, 2.5], dtype=object), np.array(["a", "b"])]
35
+ )
36
+ tm.assert_numpy_array_equal(result[0], expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_converters.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests column conversion functionality during parsing
3
+ for all of the parsers defined in parsers.py
4
+ """
5
+ from io import StringIO
6
+
7
+ from dateutil.parser import parse
8
+ import numpy as np
9
+ import pytest
10
+
11
+ import pandas as pd
12
+ from pandas import (
13
+ DataFrame,
14
+ Index,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+
19
+ def test_converters_type_must_be_dict(all_parsers):
20
+ parser = all_parsers
21
+ data = """index,A,B,C,D
22
+ foo,2,3,4,5
23
+ """
24
+ if parser.engine == "pyarrow":
25
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
26
+ with pytest.raises(ValueError, match=msg):
27
+ parser.read_csv(StringIO(data), converters=0)
28
+ return
29
+ with pytest.raises(TypeError, match="Type converters.+"):
30
+ parser.read_csv(StringIO(data), converters=0)
31
+
32
+
33
+ @pytest.mark.parametrize("column", [3, "D"])
34
+ @pytest.mark.parametrize(
35
+ "converter", [parse, lambda x: int(x.split("/")[2])] # Produce integer.
36
+ )
37
+ def test_converters(all_parsers, column, converter):
38
+ parser = all_parsers
39
+ data = """A,B,C,D
40
+ a,1,2,01/01/2009
41
+ b,3,4,01/02/2009
42
+ c,4,5,01/03/2009
43
+ """
44
+ if parser.engine == "pyarrow":
45
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
46
+ with pytest.raises(ValueError, match=msg):
47
+ parser.read_csv(StringIO(data), converters={column: converter})
48
+ return
49
+
50
+ result = parser.read_csv(StringIO(data), converters={column: converter})
51
+
52
+ expected = parser.read_csv(StringIO(data))
53
+ expected["D"] = expected["D"].map(converter)
54
+
55
+ tm.assert_frame_equal(result, expected)
56
+
57
+
58
+ def test_converters_no_implicit_conv(all_parsers):
59
+ # see gh-2184
60
+ parser = all_parsers
61
+ data = """000102,1.2,A\n001245,2,B"""
62
+
63
+ converters = {0: lambda x: x.strip()}
64
+
65
+ if parser.engine == "pyarrow":
66
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
67
+ with pytest.raises(ValueError, match=msg):
68
+ parser.read_csv(StringIO(data), header=None, converters=converters)
69
+ return
70
+
71
+ result = parser.read_csv(StringIO(data), header=None, converters=converters)
72
+
73
+ # Column 0 should not be casted to numeric and should remain as object.
74
+ expected = DataFrame([["000102", 1.2, "A"], ["001245", 2, "B"]])
75
+ tm.assert_frame_equal(result, expected)
76
+
77
+
78
+ def test_converters_euro_decimal_format(all_parsers):
79
+ # see gh-583
80
+ converters = {}
81
+ parser = all_parsers
82
+
83
+ data = """Id;Number1;Number2;Text1;Text2;Number3
84
+ 1;1521,1541;187101,9543;ABC;poi;4,7387
85
+ 2;121,12;14897,76;DEF;uyt;0,3773
86
+ 3;878,158;108013,434;GHI;rez;2,7356"""
87
+ converters["Number1"] = converters["Number2"] = converters[
88
+ "Number3"
89
+ ] = lambda x: float(x.replace(",", "."))
90
+
91
+ if parser.engine == "pyarrow":
92
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
93
+ with pytest.raises(ValueError, match=msg):
94
+ parser.read_csv(StringIO(data), sep=";", converters=converters)
95
+ return
96
+
97
+ result = parser.read_csv(StringIO(data), sep=";", converters=converters)
98
+ expected = DataFrame(
99
+ [
100
+ [1, 1521.1541, 187101.9543, "ABC", "poi", 4.7387],
101
+ [2, 121.12, 14897.76, "DEF", "uyt", 0.3773],
102
+ [3, 878.158, 108013.434, "GHI", "rez", 2.7356],
103
+ ],
104
+ columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],
105
+ )
106
+ tm.assert_frame_equal(result, expected)
107
+
108
+
109
+ def test_converters_corner_with_nans(all_parsers):
110
+ parser = all_parsers
111
+ data = """id,score,days
112
+ 1,2,12
113
+ 2,2-5,
114
+ 3,,14+
115
+ 4,6-12,2"""
116
+
117
+ # Example converters.
118
+ def convert_days(x):
119
+ x = x.strip()
120
+
121
+ if not x:
122
+ return np.nan
123
+
124
+ is_plus = x.endswith("+")
125
+
126
+ if is_plus:
127
+ x = int(x[:-1]) + 1
128
+ else:
129
+ x = int(x)
130
+
131
+ return x
132
+
133
+ def convert_days_sentinel(x):
134
+ x = x.strip()
135
+
136
+ if not x:
137
+ return np.nan
138
+
139
+ is_plus = x.endswith("+")
140
+
141
+ if is_plus:
142
+ x = int(x[:-1]) + 1
143
+ else:
144
+ x = int(x)
145
+
146
+ return x
147
+
148
+ def convert_score(x):
149
+ x = x.strip()
150
+
151
+ if not x:
152
+ return np.nan
153
+
154
+ if x.find("-") > 0:
155
+ val_min, val_max = map(int, x.split("-"))
156
+ val = 0.5 * (val_min + val_max)
157
+ else:
158
+ val = float(x)
159
+
160
+ return val
161
+
162
+ results = []
163
+
164
+ for day_converter in [convert_days, convert_days_sentinel]:
165
+ if parser.engine == "pyarrow":
166
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
167
+ with pytest.raises(ValueError, match=msg):
168
+ parser.read_csv(
169
+ StringIO(data),
170
+ converters={"score": convert_score, "days": day_converter},
171
+ na_values=["", None],
172
+ )
173
+ continue
174
+
175
+ result = parser.read_csv(
176
+ StringIO(data),
177
+ converters={"score": convert_score, "days": day_converter},
178
+ na_values=["", None],
179
+ )
180
+ assert pd.isna(result["days"][1])
181
+ results.append(result)
182
+
183
+ if parser.engine != "pyarrow":
184
+ tm.assert_frame_equal(results[0], results[1])
185
+
186
+
187
+ @pytest.mark.parametrize("conv_f", [lambda x: x, str])
188
+ def test_converter_index_col_bug(all_parsers, conv_f):
189
+ # see gh-1835 , GH#40589
190
+ parser = all_parsers
191
+ data = "A;B\n1;2\n3;4"
192
+
193
+ if parser.engine == "pyarrow":
194
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
195
+ with pytest.raises(ValueError, match=msg):
196
+ parser.read_csv(
197
+ StringIO(data), sep=";", index_col="A", converters={"A": conv_f}
198
+ )
199
+ return
200
+
201
+ rs = parser.read_csv(
202
+ StringIO(data), sep=";", index_col="A", converters={"A": conv_f}
203
+ )
204
+
205
+ xp = DataFrame({"B": [2, 4]}, index=Index(["1", "3"], name="A", dtype="object"))
206
+ tm.assert_frame_equal(rs, xp)
207
+
208
+
209
+ def test_converter_identity_object(all_parsers):
210
+ # GH#40589
211
+ parser = all_parsers
212
+ data = "A,B\n1,2\n3,4"
213
+
214
+ if parser.engine == "pyarrow":
215
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
216
+ with pytest.raises(ValueError, match=msg):
217
+ parser.read_csv(StringIO(data), converters={"A": lambda x: x})
218
+ return
219
+
220
+ rs = parser.read_csv(StringIO(data), converters={"A": lambda x: x})
221
+
222
+ xp = DataFrame({"A": ["1", "3"], "B": [2, 4]})
223
+ tm.assert_frame_equal(rs, xp)
224
+
225
+
226
+ def test_converter_multi_index(all_parsers):
227
+ # GH 42446
228
+ parser = all_parsers
229
+ data = "A,B,B\nX,Y,Z\n1,2,3"
230
+
231
+ if parser.engine == "pyarrow":
232
+ msg = "The 'converters' option is not supported with the 'pyarrow' engine"
233
+ with pytest.raises(ValueError, match=msg):
234
+ parser.read_csv(
235
+ StringIO(data),
236
+ header=list(range(2)),
237
+ converters={
238
+ ("A", "X"): np.int32,
239
+ ("B", "Y"): np.int32,
240
+ ("B", "Z"): np.float32,
241
+ },
242
+ )
243
+ return
244
+
245
+ result = parser.read_csv(
246
+ StringIO(data),
247
+ header=list(range(2)),
248
+ converters={
249
+ ("A", "X"): np.int32,
250
+ ("B", "Y"): np.int32,
251
+ ("B", "Z"): np.float32,
252
+ },
253
+ )
254
+
255
+ expected = DataFrame(
256
+ {
257
+ ("A", "X"): np.int32([1]),
258
+ ("B", "Y"): np.int32([2]),
259
+ ("B", "Z"): np.float32([3]),
260
+ }
261
+ )
262
+
263
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_dialect.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that dialects are properly handled during parsing
3
+ for all of the parsers defined in parsers.py
4
+ """
5
+
6
+ import csv
7
+ from io import StringIO
8
+
9
+ import pytest
10
+
11
+ from pandas.errors import ParserWarning
12
+
13
+ from pandas import DataFrame
14
+ import pandas._testing as tm
15
+
16
+ pytestmark = pytest.mark.filterwarnings(
17
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
18
+ )
19
+
20
+
21
+ @pytest.fixture
22
+ def custom_dialect():
23
+ dialect_name = "weird"
24
+ dialect_kwargs = {
25
+ "doublequote": False,
26
+ "escapechar": "~",
27
+ "delimiter": ":",
28
+ "skipinitialspace": False,
29
+ "quotechar": "~",
30
+ "quoting": 3,
31
+ }
32
+ return dialect_name, dialect_kwargs
33
+
34
+
35
+ def test_dialect(all_parsers):
36
+ parser = all_parsers
37
+ data = """\
38
+ label1,label2,label3
39
+ index1,"a,c,e
40
+ index2,b,d,f
41
+ """
42
+
43
+ dia = csv.excel()
44
+ dia.quoting = csv.QUOTE_NONE
45
+
46
+ if parser.engine == "pyarrow":
47
+ msg = "The 'dialect' option is not supported with the 'pyarrow' engine"
48
+ with pytest.raises(ValueError, match=msg):
49
+ parser.read_csv(StringIO(data), dialect=dia)
50
+ return
51
+
52
+ df = parser.read_csv(StringIO(data), dialect=dia)
53
+
54
+ data = """\
55
+ label1,label2,label3
56
+ index1,a,c,e
57
+ index2,b,d,f
58
+ """
59
+ exp = parser.read_csv(StringIO(data))
60
+ exp.replace("a", '"a', inplace=True)
61
+ tm.assert_frame_equal(df, exp)
62
+
63
+
64
+ def test_dialect_str(all_parsers):
65
+ dialect_name = "mydialect"
66
+ parser = all_parsers
67
+ data = """\
68
+ fruit:vegetable
69
+ apple:broccoli
70
+ pear:tomato
71
+ """
72
+ exp = DataFrame({"fruit": ["apple", "pear"], "vegetable": ["broccoli", "tomato"]})
73
+
74
+ with tm.with_csv_dialect(dialect_name, delimiter=":"):
75
+ if parser.engine == "pyarrow":
76
+ msg = "The 'dialect' option is not supported with the 'pyarrow' engine"
77
+ with pytest.raises(ValueError, match=msg):
78
+ parser.read_csv(StringIO(data), dialect=dialect_name)
79
+ return
80
+
81
+ df = parser.read_csv(StringIO(data), dialect=dialect_name)
82
+ tm.assert_frame_equal(df, exp)
83
+
84
+
85
+ def test_invalid_dialect(all_parsers):
86
+ class InvalidDialect:
87
+ pass
88
+
89
+ data = "a\n1"
90
+ parser = all_parsers
91
+ msg = "Invalid dialect"
92
+
93
+ with pytest.raises(ValueError, match=msg):
94
+ parser.read_csv(StringIO(data), dialect=InvalidDialect)
95
+
96
+
97
+ @pytest.mark.parametrize(
98
+ "arg",
99
+ [None, "doublequote", "escapechar", "skipinitialspace", "quotechar", "quoting"],
100
+ )
101
+ @pytest.mark.parametrize("value", ["dialect", "default", "other"])
102
+ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, value):
103
+ # see gh-23761.
104
+ dialect_name, dialect_kwargs = custom_dialect
105
+ parser = all_parsers
106
+
107
+ expected = DataFrame({"a": [1], "b": [2]})
108
+ data = "a:b\n1:2"
109
+
110
+ warning_klass = None
111
+ kwds = {}
112
+
113
+ # arg=None tests when we pass in the dialect without any other arguments.
114
+ if arg is not None:
115
+ if value == "dialect": # No conflict --> no warning.
116
+ kwds[arg] = dialect_kwargs[arg]
117
+ elif value == "default": # Default --> no warning.
118
+ from pandas.io.parsers.base_parser import parser_defaults
119
+
120
+ kwds[arg] = parser_defaults[arg]
121
+ else: # Non-default + conflict with dialect --> warning.
122
+ warning_klass = ParserWarning
123
+ kwds[arg] = "blah"
124
+
125
+ with tm.with_csv_dialect(dialect_name, **dialect_kwargs):
126
+ if parser.engine == "pyarrow":
127
+ msg = "The 'dialect' option is not supported with the 'pyarrow' engine"
128
+ with pytest.raises(ValueError, match=msg):
129
+ parser.read_csv_check_warnings(
130
+ # No warning bc we raise
131
+ None,
132
+ "Conflicting values for",
133
+ StringIO(data),
134
+ dialect=dialect_name,
135
+ **kwds,
136
+ )
137
+ return
138
+ result = parser.read_csv_check_warnings(
139
+ warning_klass,
140
+ "Conflicting values for",
141
+ StringIO(data),
142
+ dialect=dialect_name,
143
+ **kwds,
144
+ )
145
+ tm.assert_frame_equal(result, expected)
146
+
147
+
148
+ @pytest.mark.parametrize(
149
+ "kwargs,warning_klass",
150
+ [
151
+ ({"sep": ","}, None), # sep is default --> sep_override=True
152
+ ({"sep": "."}, ParserWarning), # sep isn't default --> sep_override=False
153
+ ({"delimiter": ":"}, None), # No conflict
154
+ ({"delimiter": None}, None), # Default arguments --> sep_override=True
155
+ ({"delimiter": ","}, ParserWarning), # Conflict
156
+ ({"delimiter": "."}, ParserWarning), # Conflict
157
+ ],
158
+ ids=[
159
+ "sep-override-true",
160
+ "sep-override-false",
161
+ "delimiter-no-conflict",
162
+ "delimiter-default-arg",
163
+ "delimiter-conflict",
164
+ "delimiter-conflict2",
165
+ ],
166
+ )
167
+ def test_dialect_conflict_delimiter(all_parsers, custom_dialect, kwargs, warning_klass):
168
+ # see gh-23761.
169
+ dialect_name, dialect_kwargs = custom_dialect
170
+ parser = all_parsers
171
+
172
+ expected = DataFrame({"a": [1], "b": [2]})
173
+ data = "a:b\n1:2"
174
+
175
+ with tm.with_csv_dialect(dialect_name, **dialect_kwargs):
176
+ if parser.engine == "pyarrow":
177
+ msg = "The 'dialect' option is not supported with the 'pyarrow' engine"
178
+ with pytest.raises(ValueError, match=msg):
179
+ parser.read_csv_check_warnings(
180
+ # no warning bc we raise
181
+ None,
182
+ "Conflicting values for 'delimiter'",
183
+ StringIO(data),
184
+ dialect=dialect_name,
185
+ **kwargs,
186
+ )
187
+ return
188
+ result = parser.read_csv_check_warnings(
189
+ warning_klass,
190
+ "Conflicting values for 'delimiter'",
191
+ StringIO(data),
192
+ dialect=dialect_name,
193
+ **kwargs,
194
+ )
195
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_encoding.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests encoding functionality during parsing
3
+ for all of the parsers defined in parsers.py
4
+ """
5
+ from io import (
6
+ BytesIO,
7
+ TextIOWrapper,
8
+ )
9
+ import os
10
+ import tempfile
11
+ import uuid
12
+
13
+ import numpy as np
14
+ import pytest
15
+
16
+ from pandas import (
17
+ DataFrame,
18
+ read_csv,
19
+ )
20
+ import pandas._testing as tm
21
+
22
+ pytestmark = pytest.mark.filterwarnings(
23
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
24
+ )
25
+
26
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
27
+
28
+
29
+ def test_bytes_io_input(all_parsers):
30
+ encoding = "cp1255"
31
+ parser = all_parsers
32
+
33
+ data = BytesIO("שלום:1234\n562:123".encode(encoding))
34
+ result = parser.read_csv(data, sep=":", encoding=encoding)
35
+
36
+ expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
37
+ tm.assert_frame_equal(result, expected)
38
+
39
+
40
+ @skip_pyarrow # CSV parse error: Empty CSV file or block
41
+ def test_read_csv_unicode(all_parsers):
42
+ parser = all_parsers
43
+ data = BytesIO("\u0141aski, Jan;1".encode())
44
+
45
+ result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
46
+ expected = DataFrame([["\u0141aski, Jan", 1]])
47
+ tm.assert_frame_equal(result, expected)
48
+
49
+
50
+ @skip_pyarrow
51
+ @pytest.mark.parametrize("sep", [",", "\t"])
52
+ @pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"])
53
+ def test_utf16_bom_skiprows(all_parsers, sep, encoding):
54
+ # see gh-2298
55
+ parser = all_parsers
56
+ data = """skip this
57
+ skip this too
58
+ A,B,C
59
+ 1,2,3
60
+ 4,5,6""".replace(
61
+ ",", sep
62
+ )
63
+ path = f"__{uuid.uuid4()}__.csv"
64
+ kwargs = {"sep": sep, "skiprows": 2}
65
+ utf8 = "utf-8"
66
+
67
+ with tm.ensure_clean(path) as path:
68
+ bytes_data = data.encode(encoding)
69
+
70
+ with open(path, "wb") as f:
71
+ f.write(bytes_data)
72
+
73
+ with TextIOWrapper(BytesIO(data.encode(utf8)), encoding=utf8) as bytes_buffer:
74
+ result = parser.read_csv(path, encoding=encoding, **kwargs)
75
+ expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)
76
+ tm.assert_frame_equal(result, expected)
77
+
78
+
79
+ def test_utf16_example(all_parsers, csv_dir_path):
80
+ path = os.path.join(csv_dir_path, "utf16_ex.txt")
81
+ parser = all_parsers
82
+ result = parser.read_csv(path, encoding="utf-16", sep="\t")
83
+ assert len(result) == 50
84
+
85
+
86
+ def test_unicode_encoding(all_parsers, csv_dir_path):
87
+ path = os.path.join(csv_dir_path, "unicode_series.csv")
88
+ parser = all_parsers
89
+
90
+ result = parser.read_csv(path, header=None, encoding="latin-1")
91
+ result = result.set_index(0)
92
+ got = result[1][1632]
93
+
94
+ expected = "\xc1 k\xf6ldum klaka (Cold Fever) (1994)"
95
+ assert got == expected
96
+
97
+
98
+ @pytest.mark.parametrize(
99
+ "data,kwargs,expected",
100
+ [
101
+ # Basic test
102
+ ("a\n1", {}, DataFrame({"a": [1]})),
103
+ # "Regular" quoting
104
+ ('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})),
105
+ # Test in a data row instead of header
106
+ ("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})),
107
+ # Test in empty data row with skipping
108
+ ("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})),
109
+ # Test in empty data row without skipping
110
+ (
111
+ "\n1",
112
+ {"names": ["a"], "skip_blank_lines": False},
113
+ DataFrame({"a": [np.nan, 1]}),
114
+ ),
115
+ ],
116
+ )
117
+ def test_utf8_bom(all_parsers, data, kwargs, expected, request):
118
+ # see gh-4793
119
+ parser = all_parsers
120
+ bom = "\ufeff"
121
+ utf8 = "utf-8"
122
+
123
+ def _encode_data_with_bom(_data):
124
+ bom_data = (bom + _data).encode(utf8)
125
+ return BytesIO(bom_data)
126
+
127
+ if (
128
+ parser.engine == "pyarrow"
129
+ and data == "\n1"
130
+ and kwargs.get("skip_blank_lines", True)
131
+ ):
132
+ # CSV parse error: Empty CSV file or block: cannot infer number of columns
133
+ pytest.skip(reason="https://github.com/apache/arrow/issues/38676")
134
+
135
+ result = parser.read_csv(_encode_data_with_bom(data), encoding=utf8, **kwargs)
136
+ tm.assert_frame_equal(result, expected)
137
+
138
+
139
+ def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt):
140
+ # see gh-13549
141
+ expected = DataFrame({"mb_num": [4.8], "multibyte": ["test"]})
142
+ parser = all_parsers
143
+
144
+ encoding = encoding_fmt.format(utf_value)
145
+ data = "mb_num,multibyte\n4.8,test".encode(encoding)
146
+
147
+ result = parser.read_csv(BytesIO(data), encoding=encoding)
148
+ tm.assert_frame_equal(result, expected)
149
+
150
+
151
+ @pytest.mark.parametrize(
152
+ "file_path,encoding",
153
+ [
154
+ (("io", "data", "csv", "test1.csv"), "utf-8"),
155
+ (("io", "parser", "data", "unicode_series.csv"), "latin-1"),
156
+ (("io", "parser", "data", "sauron.SHIFT_JIS.csv"), "shiftjis"),
157
+ ],
158
+ )
159
+ def test_binary_mode_file_buffers(all_parsers, file_path, encoding, datapath):
160
+ # gh-23779: Python csv engine shouldn't error on files opened in binary.
161
+ # gh-31575: Python csv engine shouldn't error on files opened in raw binary.
162
+ parser = all_parsers
163
+
164
+ fpath = datapath(*file_path)
165
+ expected = parser.read_csv(fpath, encoding=encoding)
166
+
167
+ with open(fpath, encoding=encoding) as fa:
168
+ result = parser.read_csv(fa)
169
+ assert not fa.closed
170
+ tm.assert_frame_equal(expected, result)
171
+
172
+ with open(fpath, mode="rb") as fb:
173
+ result = parser.read_csv(fb, encoding=encoding)
174
+ assert not fb.closed
175
+ tm.assert_frame_equal(expected, result)
176
+
177
+ with open(fpath, mode="rb", buffering=0) as fb:
178
+ result = parser.read_csv(fb, encoding=encoding)
179
+ assert not fb.closed
180
+ tm.assert_frame_equal(expected, result)
181
+
182
+
183
+ @pytest.mark.parametrize("pass_encoding", [True, False])
184
+ def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding):
185
+ # see gh-24130
186
+ parser = all_parsers
187
+ encoding = encoding_fmt.format(utf_value)
188
+
189
+ if parser.engine == "pyarrow" and pass_encoding is True and utf_value in [16, 32]:
190
+ # FIXME: this is bad!
191
+ pytest.skip("These cases freeze")
192
+
193
+ expected = DataFrame({"foo": ["bar"]})
194
+
195
+ with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f:
196
+ f.write("foo\nbar")
197
+ f.seek(0)
198
+
199
+ result = parser.read_csv(f, encoding=encoding if pass_encoding else None)
200
+ tm.assert_frame_equal(result, expected)
201
+
202
+
203
+ def test_encoding_named_temp_file(all_parsers):
204
+ # see gh-31819
205
+ parser = all_parsers
206
+ encoding = "shift-jis"
207
+
208
+ title = "てすと"
209
+ data = "こむ"
210
+
211
+ expected = DataFrame({title: [data]})
212
+
213
+ with tempfile.NamedTemporaryFile() as f:
214
+ f.write(f"{title}\n{data}".encode(encoding))
215
+
216
+ f.seek(0)
217
+
218
+ result = parser.read_csv(f, encoding=encoding)
219
+ tm.assert_frame_equal(result, expected)
220
+ assert not f.closed
221
+
222
+
223
+ @pytest.mark.parametrize(
224
+ "encoding", ["utf-8", "utf-16", "utf-16-be", "utf-16-le", "utf-32"]
225
+ )
226
+ def test_parse_encoded_special_characters(encoding):
227
+ # GH16218 Verify parsing of data with encoded special characters
228
+ # Data contains a Unicode 'FULLWIDTH COLON' (U+FF1A) at position (0,"a")
229
+ data = "a\tb\n:foo\t0\nbar\t1\nbaz\t2" # noqa: RUF001
230
+ encoded_data = BytesIO(data.encode(encoding))
231
+ result = read_csv(encoded_data, delimiter="\t", encoding=encoding)
232
+
233
+ expected = DataFrame(
234
+ data=[[":foo", 0], ["bar", 1], ["baz", 2]], # noqa: RUF001
235
+ columns=["a", "b"],
236
+ )
237
+ tm.assert_frame_equal(result, expected)
238
+
239
+
240
+ @pytest.mark.parametrize("encoding", ["utf-8", None, "utf-16", "cp1255", "latin-1"])
241
+ def test_encoding_memory_map(all_parsers, encoding):
242
+ # GH40986
243
+ parser = all_parsers
244
+ expected = DataFrame(
245
+ {
246
+ "name": ["Raphael", "Donatello", "Miguel Angel", "Leonardo"],
247
+ "mask": ["red", "purple", "orange", "blue"],
248
+ "weapon": ["sai", "bo staff", "nunchunk", "katana"],
249
+ }
250
+ )
251
+ with tm.ensure_clean() as file:
252
+ expected.to_csv(file, index=False, encoding=encoding)
253
+
254
+ if parser.engine == "pyarrow":
255
+ msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
256
+ with pytest.raises(ValueError, match=msg):
257
+ parser.read_csv(file, encoding=encoding, memory_map=True)
258
+ return
259
+
260
+ df = parser.read_csv(file, encoding=encoding, memory_map=True)
261
+ tm.assert_frame_equal(df, expected)
262
+
263
+
264
+ def test_chunk_splits_multibyte_char(all_parsers):
265
+ """
266
+ Chunk splits a multibyte character with memory_map=True
267
+
268
+ GH 43540
269
+ """
270
+ parser = all_parsers
271
+ # DEFAULT_CHUNKSIZE = 262144, defined in parsers.pyx
272
+ df = DataFrame(data=["a" * 127] * 2048)
273
+
274
+ # Put two-bytes utf-8 encoded character "ą" at the end of chunk
275
+ # utf-8 encoding of "ą" is b'\xc4\x85'
276
+ df.iloc[2047] = "a" * 127 + "ą"
277
+ with tm.ensure_clean("bug-gh43540.csv") as fname:
278
+ df.to_csv(fname, index=False, header=False, encoding="utf-8")
279
+
280
+ if parser.engine == "pyarrow":
281
+ msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
282
+ with pytest.raises(ValueError, match=msg):
283
+ parser.read_csv(fname, header=None, memory_map=True)
284
+ return
285
+
286
+ dfr = parser.read_csv(fname, header=None, memory_map=True)
287
+ tm.assert_frame_equal(dfr, df)
288
+
289
+
290
+ def test_readcsv_memmap_utf8(all_parsers):
291
+ """
292
+ GH 43787
293
+
294
+ Test correct handling of UTF-8 chars when memory_map=True and encoding is UTF-8
295
+ """
296
+ lines = []
297
+ line_length = 128
298
+ start_char = " "
299
+ end_char = "\U00010080"
300
+ # This for loop creates a list of 128-char strings
301
+ # consisting of consecutive Unicode chars
302
+ for lnum in range(ord(start_char), ord(end_char), line_length):
303
+ line = "".join([chr(c) for c in range(lnum, lnum + 0x80)]) + "\n"
304
+ try:
305
+ line.encode("utf-8")
306
+ except UnicodeEncodeError:
307
+ continue
308
+ lines.append(line)
309
+ parser = all_parsers
310
+ df = DataFrame(lines)
311
+ with tm.ensure_clean("utf8test.csv") as fname:
312
+ df.to_csv(fname, index=False, header=False, encoding="utf-8")
313
+
314
+ if parser.engine == "pyarrow":
315
+ msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"
316
+ with pytest.raises(ValueError, match=msg):
317
+ parser.read_csv(fname, header=None, memory_map=True, encoding="utf-8")
318
+ return
319
+
320
+ dfr = parser.read_csv(fname, header=None, memory_map=True, encoding="utf-8")
321
+ tm.assert_frame_equal(df, dfr)
322
+
323
+
324
+ @pytest.mark.usefixtures("pyarrow_xfail")
325
+ @pytest.mark.parametrize("mode", ["w+b", "w+t"])
326
+ def test_not_readable(all_parsers, mode):
327
+ # GH43439
328
+ parser = all_parsers
329
+ content = b"abcd"
330
+ if "t" in mode:
331
+ content = "abcd"
332
+ with tempfile.SpooledTemporaryFile(mode=mode, encoding="utf-8") as handle:
333
+ handle.write(content)
334
+ handle.seek(0)
335
+ df = parser.read_csv(handle)
336
+ expected = DataFrame([], columns=["abcd"])
337
+ tm.assert_frame_equal(df, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_header.py ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that the file header is properly handled or inferred
3
+ during parsing for all of the parsers defined in parsers.py
4
+ """
5
+
6
+ from collections import namedtuple
7
+ from io import StringIO
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ from pandas.errors import ParserError
13
+
14
+ from pandas import (
15
+ DataFrame,
16
+ Index,
17
+ MultiIndex,
18
+ )
19
+ import pandas._testing as tm
20
+
21
+ pytestmark = pytest.mark.filterwarnings(
22
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
23
+ )
24
+
25
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
26
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
27
+
28
+
29
+ @xfail_pyarrow # TypeError: an integer is required
30
+ def test_read_with_bad_header(all_parsers):
31
+ parser = all_parsers
32
+ msg = r"but only \d+ lines in file"
33
+
34
+ with pytest.raises(ValueError, match=msg):
35
+ s = StringIO(",,")
36
+ parser.read_csv(s, header=[10])
37
+
38
+
39
+ def test_negative_header(all_parsers):
40
+ # see gh-27779
41
+ parser = all_parsers
42
+ data = """1,2,3,4,5
43
+ 6,7,8,9,10
44
+ 11,12,13,14,15
45
+ """
46
+ with pytest.raises(
47
+ ValueError,
48
+ match="Passing negative integer to header is invalid. "
49
+ "For no header, use header=None instead",
50
+ ):
51
+ parser.read_csv(StringIO(data), header=-1)
52
+
53
+
54
+ @pytest.mark.parametrize("header", [([-1, 2, 4]), ([-5, 0])])
55
+ def test_negative_multi_index_header(all_parsers, header):
56
+ # see gh-27779
57
+ parser = all_parsers
58
+ data = """1,2,3,4,5
59
+ 6,7,8,9,10
60
+ 11,12,13,14,15
61
+ """
62
+ with pytest.raises(
63
+ ValueError, match="cannot specify multi-index header with negative integers"
64
+ ):
65
+ parser.read_csv(StringIO(data), header=header)
66
+
67
+
68
+ @pytest.mark.parametrize("header", [True, False])
69
+ def test_bool_header_arg(all_parsers, header):
70
+ # see gh-6114
71
+ parser = all_parsers
72
+ data = """\
73
+ MyColumn
74
+ a
75
+ b
76
+ a
77
+ b"""
78
+ msg = "Passing a bool to header is invalid"
79
+ with pytest.raises(TypeError, match=msg):
80
+ parser.read_csv(StringIO(data), header=header)
81
+
82
+
83
+ @xfail_pyarrow # AssertionError: DataFrame are different
84
+ def test_header_with_index_col(all_parsers):
85
+ parser = all_parsers
86
+ data = """foo,1,2,3
87
+ bar,4,5,6
88
+ baz,7,8,9
89
+ """
90
+ names = ["A", "B", "C"]
91
+ result = parser.read_csv(StringIO(data), names=names)
92
+
93
+ expected = DataFrame(
94
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
95
+ index=["foo", "bar", "baz"],
96
+ columns=["A", "B", "C"],
97
+ )
98
+ tm.assert_frame_equal(result, expected)
99
+
100
+
101
+ def test_header_not_first_line(all_parsers):
102
+ parser = all_parsers
103
+ data = """got,to,ignore,this,line
104
+ got,to,ignore,this,line
105
+ index,A,B,C,D
106
+ foo,2,3,4,5
107
+ bar,7,8,9,10
108
+ baz,12,13,14,15
109
+ """
110
+ data2 = """index,A,B,C,D
111
+ foo,2,3,4,5
112
+ bar,7,8,9,10
113
+ baz,12,13,14,15
114
+ """
115
+
116
+ result = parser.read_csv(StringIO(data), header=2, index_col=0)
117
+ expected = parser.read_csv(StringIO(data2), header=0, index_col=0)
118
+ tm.assert_frame_equal(result, expected)
119
+
120
+
121
+ @xfail_pyarrow # TypeError: an integer is required
122
+ def test_header_multi_index(all_parsers):
123
+ parser = all_parsers
124
+
125
+ data = """\
126
+ C0,,C_l0_g0,C_l0_g1,C_l0_g2
127
+
128
+ C1,,C_l1_g0,C_l1_g1,C_l1_g2
129
+ C2,,C_l2_g0,C_l2_g1,C_l2_g2
130
+ C3,,C_l3_g0,C_l3_g1,C_l3_g2
131
+ R0,R1,,,
132
+ R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
133
+ R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
134
+ R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
135
+ R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
136
+ R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
137
+ """
138
+ result = parser.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[0, 1])
139
+ data_gen_f = lambda r, c: f"R{r}C{c}"
140
+
141
+ data = [[data_gen_f(r, c) for c in range(3)] for r in range(5)]
142
+ index = MultiIndex.from_arrays(
143
+ [[f"R_l0_g{i}" for i in range(5)], [f"R_l1_g{i}" for i in range(5)]],
144
+ names=["R0", "R1"],
145
+ )
146
+ columns = MultiIndex.from_arrays(
147
+ [
148
+ [f"C_l0_g{i}" for i in range(3)],
149
+ [f"C_l1_g{i}" for i in range(3)],
150
+ [f"C_l2_g{i}" for i in range(3)],
151
+ [f"C_l3_g{i}" for i in range(3)],
152
+ ],
153
+ names=["C0", "C1", "C2", "C3"],
154
+ )
155
+ expected = DataFrame(data, columns=columns, index=index)
156
+ tm.assert_frame_equal(result, expected)
157
+
158
+
159
+ @pytest.mark.parametrize(
160
+ "kwargs,msg",
161
+ [
162
+ (
163
+ {"index_col": ["foo", "bar"]},
164
+ (
165
+ "index_col must only contain "
166
+ "row numbers when specifying "
167
+ "a multi-index header"
168
+ ),
169
+ ),
170
+ (
171
+ {"index_col": [0, 1], "names": ["foo", "bar"]},
172
+ ("cannot specify names when specifying a multi-index header"),
173
+ ),
174
+ (
175
+ {"index_col": [0, 1], "usecols": ["foo", "bar"]},
176
+ ("cannot specify usecols when specifying a multi-index header"),
177
+ ),
178
+ ],
179
+ )
180
+ def test_header_multi_index_invalid(all_parsers, kwargs, msg):
181
+ data = """\
182
+ C0,,C_l0_g0,C_l0_g1,C_l0_g2
183
+
184
+ C1,,C_l1_g0,C_l1_g1,C_l1_g2
185
+ C2,,C_l2_g0,C_l2_g1,C_l2_g2
186
+ C3,,C_l3_g0,C_l3_g1,C_l3_g2
187
+ R0,R1,,,
188
+ R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
189
+ R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
190
+ R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
191
+ R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
192
+ R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
193
+ """
194
+ parser = all_parsers
195
+
196
+ with pytest.raises(ValueError, match=msg):
197
+ parser.read_csv(StringIO(data), header=[0, 1, 2, 3], **kwargs)
198
+
199
+
200
+ _TestTuple = namedtuple("_TestTuple", ["first", "second"])
201
+
202
+
203
+ @xfail_pyarrow # TypeError: an integer is required
204
+ @pytest.mark.parametrize(
205
+ "kwargs",
206
+ [
207
+ {"header": [0, 1]},
208
+ {
209
+ "skiprows": 3,
210
+ "names": [
211
+ ("a", "q"),
212
+ ("a", "r"),
213
+ ("a", "s"),
214
+ ("b", "t"),
215
+ ("c", "u"),
216
+ ("c", "v"),
217
+ ],
218
+ },
219
+ {
220
+ "skiprows": 3,
221
+ "names": [
222
+ _TestTuple("a", "q"),
223
+ _TestTuple("a", "r"),
224
+ _TestTuple("a", "s"),
225
+ _TestTuple("b", "t"),
226
+ _TestTuple("c", "u"),
227
+ _TestTuple("c", "v"),
228
+ ],
229
+ },
230
+ ],
231
+ )
232
+ def test_header_multi_index_common_format1(all_parsers, kwargs):
233
+ parser = all_parsers
234
+ expected = DataFrame(
235
+ [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
236
+ index=["one", "two"],
237
+ columns=MultiIndex.from_tuples(
238
+ [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
239
+ ),
240
+ )
241
+ data = """,a,a,a,b,c,c
242
+ ,q,r,s,t,u,v
243
+ ,,,,,,
244
+ one,1,2,3,4,5,6
245
+ two,7,8,9,10,11,12"""
246
+
247
+ result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
248
+ tm.assert_frame_equal(result, expected)
249
+
250
+
251
+ @xfail_pyarrow # TypeError: an integer is required
252
+ @pytest.mark.parametrize(
253
+ "kwargs",
254
+ [
255
+ {"header": [0, 1]},
256
+ {
257
+ "skiprows": 2,
258
+ "names": [
259
+ ("a", "q"),
260
+ ("a", "r"),
261
+ ("a", "s"),
262
+ ("b", "t"),
263
+ ("c", "u"),
264
+ ("c", "v"),
265
+ ],
266
+ },
267
+ {
268
+ "skiprows": 2,
269
+ "names": [
270
+ _TestTuple("a", "q"),
271
+ _TestTuple("a", "r"),
272
+ _TestTuple("a", "s"),
273
+ _TestTuple("b", "t"),
274
+ _TestTuple("c", "u"),
275
+ _TestTuple("c", "v"),
276
+ ],
277
+ },
278
+ ],
279
+ )
280
+ def test_header_multi_index_common_format2(all_parsers, kwargs):
281
+ parser = all_parsers
282
+ expected = DataFrame(
283
+ [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
284
+ index=["one", "two"],
285
+ columns=MultiIndex.from_tuples(
286
+ [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
287
+ ),
288
+ )
289
+ data = """,a,a,a,b,c,c
290
+ ,q,r,s,t,u,v
291
+ one,1,2,3,4,5,6
292
+ two,7,8,9,10,11,12"""
293
+
294
+ result = parser.read_csv(StringIO(data), index_col=0, **kwargs)
295
+ tm.assert_frame_equal(result, expected)
296
+
297
+
298
+ @xfail_pyarrow # TypeError: an integer is required
299
+ @pytest.mark.parametrize(
300
+ "kwargs",
301
+ [
302
+ {"header": [0, 1]},
303
+ {
304
+ "skiprows": 2,
305
+ "names": [
306
+ ("a", "q"),
307
+ ("a", "r"),
308
+ ("a", "s"),
309
+ ("b", "t"),
310
+ ("c", "u"),
311
+ ("c", "v"),
312
+ ],
313
+ },
314
+ {
315
+ "skiprows": 2,
316
+ "names": [
317
+ _TestTuple("a", "q"),
318
+ _TestTuple("a", "r"),
319
+ _TestTuple("a", "s"),
320
+ _TestTuple("b", "t"),
321
+ _TestTuple("c", "u"),
322
+ _TestTuple("c", "v"),
323
+ ],
324
+ },
325
+ ],
326
+ )
327
+ def test_header_multi_index_common_format3(all_parsers, kwargs):
328
+ parser = all_parsers
329
+ expected = DataFrame(
330
+ [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
331
+ index=["one", "two"],
332
+ columns=MultiIndex.from_tuples(
333
+ [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]
334
+ ),
335
+ )
336
+ expected = expected.reset_index(drop=True)
337
+ data = """a,a,a,b,c,c
338
+ q,r,s,t,u,v
339
+ 1,2,3,4,5,6
340
+ 7,8,9,10,11,12"""
341
+
342
+ result = parser.read_csv(StringIO(data), index_col=None, **kwargs)
343
+ tm.assert_frame_equal(result, expected)
344
+
345
+
346
+ @xfail_pyarrow # TypeError: an integer is required
347
+ def test_header_multi_index_common_format_malformed1(all_parsers):
348
+ parser = all_parsers
349
+ expected = DataFrame(
350
+ np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),
351
+ index=Index([1, 7]),
352
+ columns=MultiIndex(
353
+ levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]],
354
+ codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
355
+ names=["a", "q"],
356
+ ),
357
+ )
358
+ data = """a,a,a,b,c,c
359
+ q,r,s,t,u,v
360
+ 1,2,3,4,5,6
361
+ 7,8,9,10,11,12"""
362
+
363
+ result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)
364
+ tm.assert_frame_equal(expected, result)
365
+
366
+
367
+ @xfail_pyarrow # TypeError: an integer is required
368
+ def test_header_multi_index_common_format_malformed2(all_parsers):
369
+ parser = all_parsers
370
+ expected = DataFrame(
371
+ np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),
372
+ index=Index([1, 7]),
373
+ columns=MultiIndex(
374
+ levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]],
375
+ codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],
376
+ names=[None, "q"],
377
+ ),
378
+ )
379
+
380
+ data = """,a,a,b,c,c
381
+ q,r,s,t,u,v
382
+ 1,2,3,4,5,6
383
+ 7,8,9,10,11,12"""
384
+
385
+ result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)
386
+ tm.assert_frame_equal(expected, result)
387
+
388
+
389
+ @xfail_pyarrow # TypeError: an integer is required
390
+ def test_header_multi_index_common_format_malformed3(all_parsers):
391
+ parser = all_parsers
392
+ expected = DataFrame(
393
+ np.array([[3, 4, 5, 6], [9, 10, 11, 12]], dtype="int64"),
394
+ index=MultiIndex(levels=[[1, 7], [2, 8]], codes=[[0, 1], [0, 1]]),
395
+ columns=MultiIndex(
396
+ levels=[["a", "b", "c"], ["s", "t", "u", "v"]],
397
+ codes=[[0, 1, 2, 2], [0, 1, 2, 3]],
398
+ names=[None, "q"],
399
+ ),
400
+ )
401
+ data = """,a,a,b,c,c
402
+ q,r,s,t,u,v
403
+ 1,2,3,4,5,6
404
+ 7,8,9,10,11,12"""
405
+
406
+ result = parser.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
407
+ tm.assert_frame_equal(expected, result)
408
+
409
+
410
+ @xfail_pyarrow # TypeError: an integer is required
411
+ def test_header_multi_index_blank_line(all_parsers):
412
+ # GH 40442
413
+ parser = all_parsers
414
+ data = [[None, None], [1, 2], [3, 4]]
415
+ columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")])
416
+ expected = DataFrame(data, columns=columns)
417
+ data = "a,b\nA,B\n,\n1,2\n3,4"
418
+ result = parser.read_csv(StringIO(data), header=[0, 1])
419
+ tm.assert_frame_equal(expected, result)
420
+
421
+
422
+ @pytest.mark.parametrize(
423
+ "data,header", [("1,2,3\n4,5,6", None), ("foo,bar,baz\n1,2,3\n4,5,6", 0)]
424
+ )
425
+ def test_header_names_backward_compat(all_parsers, data, header, request):
426
+ # see gh-2539
427
+ parser = all_parsers
428
+
429
+ if parser.engine == "pyarrow" and header is not None:
430
+ mark = pytest.mark.xfail(reason="DataFrame.columns are different")
431
+ request.applymarker(mark)
432
+
433
+ expected = parser.read_csv(StringIO("1,2,3\n4,5,6"), names=["a", "b", "c"])
434
+
435
+ result = parser.read_csv(StringIO(data), names=["a", "b", "c"], header=header)
436
+ tm.assert_frame_equal(result, expected)
437
+
438
+
439
+ @skip_pyarrow # CSV parse error: Empty CSV file or block: cannot infer
440
+ @pytest.mark.parametrize("kwargs", [{}, {"index_col": False}])
441
+ def test_read_only_header_no_rows(all_parsers, kwargs):
442
+ # See gh-7773
443
+ parser = all_parsers
444
+ expected = DataFrame(columns=["a", "b", "c"])
445
+
446
+ result = parser.read_csv(StringIO("a,b,c"), **kwargs)
447
+ tm.assert_frame_equal(result, expected)
448
+
449
+
450
+ @pytest.mark.parametrize(
451
+ "kwargs,names",
452
+ [
453
+ ({}, [0, 1, 2, 3, 4]),
454
+ (
455
+ {"names": ["foo", "bar", "baz", "quux", "panda"]},
456
+ ["foo", "bar", "baz", "quux", "panda"],
457
+ ),
458
+ ],
459
+ )
460
+ def test_no_header(all_parsers, kwargs, names):
461
+ parser = all_parsers
462
+ data = """1,2,3,4,5
463
+ 6,7,8,9,10
464
+ 11,12,13,14,15
465
+ """
466
+ expected = DataFrame(
467
+ [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], columns=names
468
+ )
469
+ result = parser.read_csv(StringIO(data), header=None, **kwargs)
470
+ tm.assert_frame_equal(result, expected)
471
+
472
+
473
+ @pytest.mark.parametrize("header", [["a", "b"], "string_header"])
474
+ def test_non_int_header(all_parsers, header):
475
+ # see gh-16338
476
+ msg = "header must be integer or list of integers"
477
+ data = """1,2\n3,4"""
478
+ parser = all_parsers
479
+
480
+ with pytest.raises(ValueError, match=msg):
481
+ parser.read_csv(StringIO(data), header=header)
482
+
483
+
484
+ @xfail_pyarrow # TypeError: an integer is required
485
+ def test_singleton_header(all_parsers):
486
+ # see gh-7757
487
+ data = """a,b,c\n0,1,2\n1,2,3"""
488
+ parser = all_parsers
489
+
490
+ expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]})
491
+ result = parser.read_csv(StringIO(data), header=[0])
492
+ tm.assert_frame_equal(result, expected)
493
+
494
+
495
+ @xfail_pyarrow # TypeError: an integer is required
496
+ @pytest.mark.parametrize(
497
+ "data,expected",
498
+ [
499
+ (
500
+ "A,A,A,B\none,one,one,two\n0,40,34,0.1",
501
+ DataFrame(
502
+ [[0, 40, 34, 0.1]],
503
+ columns=MultiIndex.from_tuples(
504
+ [("A", "one"), ("A", "one.1"), ("A", "one.2"), ("B", "two")]
505
+ ),
506
+ ),
507
+ ),
508
+ (
509
+ "A,A,A,B\none,one,one.1,two\n0,40,34,0.1",
510
+ DataFrame(
511
+ [[0, 40, 34, 0.1]],
512
+ columns=MultiIndex.from_tuples(
513
+ [("A", "one"), ("A", "one.1"), ("A", "one.1.1"), ("B", "two")]
514
+ ),
515
+ ),
516
+ ),
517
+ (
518
+ "A,A,A,B,B\none,one,one.1,two,two\n0,40,34,0.1,0.1",
519
+ DataFrame(
520
+ [[0, 40, 34, 0.1, 0.1]],
521
+ columns=MultiIndex.from_tuples(
522
+ [
523
+ ("A", "one"),
524
+ ("A", "one.1"),
525
+ ("A", "one.1.1"),
526
+ ("B", "two"),
527
+ ("B", "two.1"),
528
+ ]
529
+ ),
530
+ ),
531
+ ),
532
+ ],
533
+ )
534
+ def test_mangles_multi_index(all_parsers, data, expected):
535
+ # see gh-18062
536
+ parser = all_parsers
537
+
538
+ result = parser.read_csv(StringIO(data), header=[0, 1])
539
+ tm.assert_frame_equal(result, expected)
540
+
541
+
542
+ @xfail_pyarrow # TypeError: an integer is requireds
543
+ @pytest.mark.parametrize("index_col", [None, [0]])
544
+ @pytest.mark.parametrize(
545
+ "columns", [None, (["", "Unnamed"]), (["Unnamed", ""]), (["Unnamed", "NotUnnamed"])]
546
+ )
547
+ def test_multi_index_unnamed(all_parsers, index_col, columns):
548
+ # see gh-23687
549
+ #
550
+ # When specifying a multi-index header, make sure that
551
+ # we don't error just because one of the rows in our header
552
+ # has ALL column names containing the string "Unnamed". The
553
+ # correct condition to check is whether the row contains
554
+ # ALL columns that did not have names (and instead were given
555
+ # placeholder ones).
556
+ parser = all_parsers
557
+ header = [0, 1]
558
+
559
+ if index_col is None:
560
+ data = ",".join(columns or ["", ""]) + "\n0,1\n2,3\n4,5\n"
561
+ else:
562
+ data = ",".join([""] + (columns or ["", ""])) + "\n,0,1\n0,2,3\n1,4,5\n"
563
+
564
+ result = parser.read_csv(StringIO(data), header=header, index_col=index_col)
565
+ exp_columns = []
566
+
567
+ if columns is None:
568
+ columns = ["", "", ""]
569
+
570
+ for i, col in enumerate(columns):
571
+ if not col: # Unnamed.
572
+ col = f"Unnamed: {i if index_col is None else i + 1}_level_0"
573
+
574
+ exp_columns.append(col)
575
+
576
+ columns = MultiIndex.from_tuples(zip(exp_columns, ["0", "1"]))
577
+ expected = DataFrame([[2, 3], [4, 5]], columns=columns)
578
+ tm.assert_frame_equal(result, expected)
579
+
580
+
581
+ @skip_pyarrow # CSV parse error: Expected 2 columns, got 3
582
+ def test_names_longer_than_header_but_equal_with_data_rows(all_parsers):
583
+ # GH#38453
584
+ parser = all_parsers
585
+ data = """a, b
586
+ 1,2,3
587
+ 5,6,4
588
+ """
589
+ result = parser.read_csv(StringIO(data), header=0, names=["A", "B", "C"])
590
+ expected = DataFrame({"A": [1, 5], "B": [2, 6], "C": [3, 4]})
591
+ tm.assert_frame_equal(result, expected)
592
+
593
+
594
+ @xfail_pyarrow # TypeError: an integer is required
595
+ def test_read_csv_multiindex_columns(all_parsers):
596
+ # GH#6051
597
+ parser = all_parsers
598
+
599
+ s1 = "Male, Male, Male, Female, Female\nR, R, L, R, R\n.86, .67, .88, .78, .81"
600
+ s2 = (
601
+ "Male, Male, Male, Female, Female\n"
602
+ "R, R, L, R, R\n"
603
+ ".86, .67, .88, .78, .81\n"
604
+ ".86, .67, .88, .78, .82"
605
+ )
606
+
607
+ mi = MultiIndex.from_tuples(
608
+ [
609
+ ("Male", "R"),
610
+ (" Male", " R"),
611
+ (" Male", " L"),
612
+ (" Female", " R"),
613
+ (" Female", " R.1"),
614
+ ]
615
+ )
616
+ expected = DataFrame(
617
+ [[0.86, 0.67, 0.88, 0.78, 0.81], [0.86, 0.67, 0.88, 0.78, 0.82]], columns=mi
618
+ )
619
+
620
+ df1 = parser.read_csv(StringIO(s1), header=[0, 1])
621
+ tm.assert_frame_equal(df1, expected.iloc[:1])
622
+ df2 = parser.read_csv(StringIO(s2), header=[0, 1])
623
+ tm.assert_frame_equal(df2, expected)
624
+
625
+
626
+ @xfail_pyarrow # TypeError: an integer is required
627
+ def test_read_csv_multi_header_length_check(all_parsers):
628
+ # GH#43102
629
+ parser = all_parsers
630
+
631
+ case = """row11,row12,row13
632
+ row21,row22, row23
633
+ row31,row32
634
+ """
635
+
636
+ with pytest.raises(
637
+ ParserError, match="Header rows must have an equal number of columns."
638
+ ):
639
+ parser.read_csv(StringIO(case), header=[0, 2])
640
+
641
+
642
+ @skip_pyarrow # CSV parse error: Expected 3 columns, got 2
643
+ def test_header_none_and_implicit_index(all_parsers):
644
+ # GH#22144
645
+ parser = all_parsers
646
+ data = "x,1,5\ny,2\nz,3\n"
647
+ result = parser.read_csv(StringIO(data), names=["a", "b"], header=None)
648
+ expected = DataFrame(
649
+ {"a": [1, 2, 3], "b": [5, np.nan, np.nan]}, index=["x", "y", "z"]
650
+ )
651
+ tm.assert_frame_equal(result, expected)
652
+
653
+
654
+ @skip_pyarrow # regex mismatch "CSV parse error: Expected 2 columns, got "
655
+ def test_header_none_and_implicit_index_in_second_row(all_parsers):
656
+ # GH#22144
657
+ parser = all_parsers
658
+ data = "x,1\ny,2,5\nz,3\n"
659
+ with pytest.raises(ParserError, match="Expected 2 fields in line 2, saw 3"):
660
+ parser.read_csv(StringIO(data), names=["a", "b"], header=None)
661
+
662
+
663
+ def test_header_none_and_on_bad_lines_skip(all_parsers):
664
+ # GH#22144
665
+ parser = all_parsers
666
+ data = "x,1\ny,2,5\nz,3\n"
667
+ result = parser.read_csv(
668
+ StringIO(data), names=["a", "b"], header=None, on_bad_lines="skip"
669
+ )
670
+ expected = DataFrame({"a": ["x", "z"], "b": [1, 3]})
671
+ tm.assert_frame_equal(result, expected)
672
+
673
+
674
+ @xfail_pyarrow # TypeError: an integer is requireds
675
+ def test_header_missing_rows(all_parsers):
676
+ # GH#47400
677
+ parser = all_parsers
678
+ data = """a,b
679
+ 1,2
680
+ """
681
+ msg = r"Passed header=\[0,1,2\], len of 3, but only 2 lines in file"
682
+ with pytest.raises(ValueError, match=msg):
683
+ parser.read_csv(StringIO(data), header=[0, 1, 2])
684
+
685
+
686
+ # ValueError: The 'delim_whitespace' option is not supported with the 'pyarrow' engine
687
+ @xfail_pyarrow
688
+ def test_header_multiple_whitespaces(all_parsers):
689
+ # GH#54931
690
+ parser = all_parsers
691
+ data = """aa bb(1,1) cc(1,1)
692
+ 0 2 3.5"""
693
+
694
+ result = parser.read_csv(StringIO(data), sep=r"\s+")
695
+ expected = DataFrame({"aa": [0], "bb(1,1)": 2, "cc(1,1)": 3.5})
696
+ tm.assert_frame_equal(result, expected)
697
+
698
+
699
+ # ValueError: The 'delim_whitespace' option is not supported with the 'pyarrow' engine
700
+ @xfail_pyarrow
701
+ def test_header_delim_whitespace(all_parsers):
702
+ # GH#54918
703
+ parser = all_parsers
704
+ data = """a,b
705
+ 1,2
706
+ 3,4
707
+ """
708
+
709
+ depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"
710
+ with tm.assert_produces_warning(
711
+ FutureWarning, match=depr_msg, check_stacklevel=False
712
+ ):
713
+ result = parser.read_csv(StringIO(data), delim_whitespace=True)
714
+ expected = DataFrame({"a,b": ["1,2", "3,4"]})
715
+ tm.assert_frame_equal(result, expected)
716
+
717
+
718
+ def test_usecols_no_header_pyarrow(pyarrow_parser_only):
719
+ parser = pyarrow_parser_only
720
+ data = """
721
+ a,i,x
722
+ b,j,y
723
+ """
724
+ result = parser.read_csv(
725
+ StringIO(data),
726
+ header=None,
727
+ usecols=[0, 1],
728
+ dtype="string[pyarrow]",
729
+ dtype_backend="pyarrow",
730
+ engine="pyarrow",
731
+ )
732
+ expected = DataFrame([["a", "i"], ["b", "j"]], dtype="string[pyarrow]")
733
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_index_col.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that the specified index column (a.k.a "index_col")
3
+ is properly handled or inferred during parsing for all of
4
+ the parsers defined in parsers.py
5
+ """
6
+ from io import StringIO
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas import (
12
+ DataFrame,
13
+ Index,
14
+ MultiIndex,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+ pytestmark = pytest.mark.filterwarnings(
19
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
20
+ )
21
+
22
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
23
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
24
+
25
+
26
+ @pytest.mark.parametrize("with_header", [True, False])
27
+ def test_index_col_named(all_parsers, with_header):
28
+ parser = all_parsers
29
+ no_header = """\
30
+ KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
31
+ KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
32
+ KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
33
+ KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
34
+ KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
35
+ KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
36
+ header = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
37
+
38
+ if with_header:
39
+ data = header + no_header
40
+
41
+ result = parser.read_csv(StringIO(data), index_col="ID")
42
+ expected = parser.read_csv(StringIO(data), header=0).set_index("ID")
43
+ tm.assert_frame_equal(result, expected)
44
+ else:
45
+ data = no_header
46
+ msg = "Index ID invalid"
47
+
48
+ with pytest.raises(ValueError, match=msg):
49
+ parser.read_csv(StringIO(data), index_col="ID")
50
+
51
+
52
+ def test_index_col_named2(all_parsers):
53
+ parser = all_parsers
54
+ data = """\
55
+ 1,2,3,4,hello
56
+ 5,6,7,8,world
57
+ 9,10,11,12,foo
58
+ """
59
+
60
+ expected = DataFrame(
61
+ {"a": [1, 5, 9], "b": [2, 6, 10], "c": [3, 7, 11], "d": [4, 8, 12]},
62
+ index=Index(["hello", "world", "foo"], name="message"),
63
+ )
64
+ names = ["a", "b", "c", "d", "message"]
65
+
66
+ result = parser.read_csv(StringIO(data), names=names, index_col=["message"])
67
+ tm.assert_frame_equal(result, expected)
68
+
69
+
70
+ def test_index_col_is_true(all_parsers):
71
+ # see gh-9798
72
+ data = "a,b\n1,2"
73
+ parser = all_parsers
74
+
75
+ msg = "The value of index_col couldn't be 'True'"
76
+ with pytest.raises(ValueError, match=msg):
77
+ parser.read_csv(StringIO(data), index_col=True)
78
+
79
+
80
+ @skip_pyarrow # CSV parse error: Expected 3 columns, got 4
81
+ def test_infer_index_col(all_parsers):
82
+ data = """A,B,C
83
+ foo,1,2,3
84
+ bar,4,5,6
85
+ baz,7,8,9
86
+ """
87
+ parser = all_parsers
88
+ result = parser.read_csv(StringIO(data))
89
+
90
+ expected = DataFrame(
91
+ [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
92
+ index=["foo", "bar", "baz"],
93
+ columns=["A", "B", "C"],
94
+ )
95
+ tm.assert_frame_equal(result, expected)
96
+
97
+
98
+ @skip_pyarrow # CSV parse error: Empty CSV file or block
99
+ @pytest.mark.parametrize(
100
+ "index_col,kwargs",
101
+ [
102
+ (None, {"columns": ["x", "y", "z"]}),
103
+ (False, {"columns": ["x", "y", "z"]}),
104
+ (0, {"columns": ["y", "z"], "index": Index([], name="x")}),
105
+ (1, {"columns": ["x", "z"], "index": Index([], name="y")}),
106
+ ("x", {"columns": ["y", "z"], "index": Index([], name="x")}),
107
+ ("y", {"columns": ["x", "z"], "index": Index([], name="y")}),
108
+ (
109
+ [0, 1],
110
+ {
111
+ "columns": ["z"],
112
+ "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),
113
+ },
114
+ ),
115
+ (
116
+ ["x", "y"],
117
+ {
118
+ "columns": ["z"],
119
+ "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),
120
+ },
121
+ ),
122
+ (
123
+ [1, 0],
124
+ {
125
+ "columns": ["z"],
126
+ "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),
127
+ },
128
+ ),
129
+ (
130
+ ["y", "x"],
131
+ {
132
+ "columns": ["z"],
133
+ "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),
134
+ },
135
+ ),
136
+ ],
137
+ )
138
+ def test_index_col_empty_data(all_parsers, index_col, kwargs):
139
+ data = "x,y,z"
140
+ parser = all_parsers
141
+ result = parser.read_csv(StringIO(data), index_col=index_col)
142
+
143
+ expected = DataFrame(**kwargs)
144
+ tm.assert_frame_equal(result, expected)
145
+
146
+
147
+ @skip_pyarrow # CSV parse error: Empty CSV file or block
148
+ def test_empty_with_index_col_false(all_parsers):
149
+ # see gh-10413
150
+ data = "x,y"
151
+ parser = all_parsers
152
+ result = parser.read_csv(StringIO(data), index_col=False)
153
+
154
+ expected = DataFrame(columns=["x", "y"])
155
+ tm.assert_frame_equal(result, expected)
156
+
157
+
158
+ @pytest.mark.parametrize(
159
+ "index_names",
160
+ [
161
+ ["", ""],
162
+ ["foo", ""],
163
+ ["", "bar"],
164
+ ["foo", "bar"],
165
+ ["NotReallyUnnamed", "Unnamed: 0"],
166
+ ],
167
+ )
168
+ def test_multi_index_naming(all_parsers, index_names, request):
169
+ parser = all_parsers
170
+
171
+ if parser.engine == "pyarrow" and "" in index_names:
172
+ mark = pytest.mark.xfail(reason="One case raises, others are wrong")
173
+ request.applymarker(mark)
174
+
175
+ # We don't want empty index names being replaced with "Unnamed: 0"
176
+ data = ",".join(index_names + ["col\na,c,1\na,d,2\nb,c,3\nb,d,4"])
177
+ result = parser.read_csv(StringIO(data), index_col=[0, 1])
178
+
179
+ expected = DataFrame(
180
+ {"col": [1, 2, 3, 4]}, index=MultiIndex.from_product([["a", "b"], ["c", "d"]])
181
+ )
182
+ expected.index.names = [name if name else None for name in index_names]
183
+ tm.assert_frame_equal(result, expected)
184
+
185
+
186
+ @xfail_pyarrow # ValueError: Found non-unique column index
187
+ def test_multi_index_naming_not_all_at_beginning(all_parsers):
188
+ parser = all_parsers
189
+ data = ",Unnamed: 2,\na,c,1\na,d,2\nb,c,3\nb,d,4"
190
+ result = parser.read_csv(StringIO(data), index_col=[0, 2])
191
+
192
+ expected = DataFrame(
193
+ {"Unnamed: 2": ["c", "d", "c", "d"]},
194
+ index=MultiIndex(
195
+ levels=[["a", "b"], [1, 2, 3, 4]], codes=[[0, 0, 1, 1], [0, 1, 2, 3]]
196
+ ),
197
+ )
198
+ tm.assert_frame_equal(result, expected)
199
+
200
+
201
+ @xfail_pyarrow # ValueError: Found non-unique column index
202
+ def test_no_multi_index_level_names_empty(all_parsers):
203
+ # GH 10984
204
+ parser = all_parsers
205
+ midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
206
+ expected = DataFrame(
207
+ np.random.default_rng(2).standard_normal((3, 3)),
208
+ index=midx,
209
+ columns=["x", "y", "z"],
210
+ )
211
+ with tm.ensure_clean() as path:
212
+ expected.to_csv(path)
213
+ result = parser.read_csv(path, index_col=[0, 1, 2])
214
+ tm.assert_frame_equal(result, expected)
215
+
216
+
217
+ @xfail_pyarrow # TypeError: an integer is required
218
+ def test_header_with_index_col(all_parsers):
219
+ # GH 33476
220
+ parser = all_parsers
221
+ data = """
222
+ I11,A,A
223
+ I12,B,B
224
+ I2,1,3
225
+ """
226
+ midx = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])
227
+ idx = Index(["I2"])
228
+ expected = DataFrame([[1, 3]], index=idx, columns=midx)
229
+
230
+ result = parser.read_csv(StringIO(data), index_col=0, header=[0, 1])
231
+ tm.assert_frame_equal(result, expected)
232
+
233
+ col_idx = Index(["A", "A.1"])
234
+ idx = Index(["I12", "I2"], name="I11")
235
+ expected = DataFrame([["B", "B"], ["1", "3"]], index=idx, columns=col_idx)
236
+
237
+ result = parser.read_csv(StringIO(data), index_col="I11", header=0)
238
+ tm.assert_frame_equal(result, expected)
239
+
240
+
241
+ @pytest.mark.slow
242
+ def test_index_col_large_csv(all_parsers, monkeypatch):
243
+ # https://github.com/pandas-dev/pandas/issues/37094
244
+ parser = all_parsers
245
+
246
+ ARR_LEN = 100
247
+ df = DataFrame(
248
+ {
249
+ "a": range(ARR_LEN + 1),
250
+ "b": np.random.default_rng(2).standard_normal(ARR_LEN + 1),
251
+ }
252
+ )
253
+
254
+ with tm.ensure_clean() as path:
255
+ df.to_csv(path, index=False)
256
+ with monkeypatch.context() as m:
257
+ m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)
258
+ result = parser.read_csv(path, index_col=[0])
259
+
260
+ tm.assert_frame_equal(result, df.set_index("a"))
261
+
262
+
263
+ @xfail_pyarrow # TypeError: an integer is required
264
+ def test_index_col_multiindex_columns_no_data(all_parsers):
265
+ # GH#38292
266
+ parser = all_parsers
267
+ result = parser.read_csv(
268
+ StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1], index_col=0
269
+ )
270
+ expected = DataFrame(
271
+ [],
272
+ index=Index([]),
273
+ columns=MultiIndex.from_arrays(
274
+ [["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]
275
+ ),
276
+ )
277
+ tm.assert_frame_equal(result, expected)
278
+
279
+
280
+ @xfail_pyarrow # TypeError: an integer is required
281
+ def test_index_col_header_no_data(all_parsers):
282
+ # GH#38292
283
+ parser = all_parsers
284
+ result = parser.read_csv(StringIO("a0,a1,a2\n"), header=[0], index_col=0)
285
+ expected = DataFrame(
286
+ [],
287
+ columns=["a1", "a2"],
288
+ index=Index([], name="a0"),
289
+ )
290
+ tm.assert_frame_equal(result, expected)
291
+
292
+
293
+ @xfail_pyarrow # TypeError: an integer is required
294
+ def test_multiindex_columns_no_data(all_parsers):
295
+ # GH#38292
296
+ parser = all_parsers
297
+ result = parser.read_csv(StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1])
298
+ expected = DataFrame(
299
+ [], columns=MultiIndex.from_arrays([["a0", "a1", "a2"], ["b0", "b1", "b2"]])
300
+ )
301
+ tm.assert_frame_equal(result, expected)
302
+
303
+
304
+ @xfail_pyarrow # TypeError: an integer is required
305
+ def test_multiindex_columns_index_col_with_data(all_parsers):
306
+ # GH#38292
307
+ parser = all_parsers
308
+ result = parser.read_csv(
309
+ StringIO("a0,a1,a2\nb0,b1,b2\ndata,data,data"), header=[0, 1], index_col=0
310
+ )
311
+ expected = DataFrame(
312
+ [["data", "data"]],
313
+ columns=MultiIndex.from_arrays(
314
+ [["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]
315
+ ),
316
+ index=Index(["data"]),
317
+ )
318
+ tm.assert_frame_equal(result, expected)
319
+
320
+
321
+ @skip_pyarrow # CSV parse error: Empty CSV file or block
322
+ def test_infer_types_boolean_sum(all_parsers):
323
+ # GH#44079
324
+ parser = all_parsers
325
+ result = parser.read_csv(
326
+ StringIO("0,1"),
327
+ names=["a", "b"],
328
+ index_col=["a"],
329
+ dtype={"a": "UInt8"},
330
+ )
331
+ expected = DataFrame(
332
+ data={
333
+ "a": [
334
+ 0,
335
+ ],
336
+ "b": [1],
337
+ }
338
+ ).set_index("a")
339
+ # Not checking index type now, because the C parser will return a
340
+ # index column of dtype 'object', and the Python parser will return a
341
+ # index column of dtype 'int64'.
342
+ tm.assert_frame_equal(result, expected, check_index_type=False)
343
+
344
+
345
+ @pytest.mark.parametrize("dtype, val", [(object, "01"), ("int64", 1)])
346
+ def test_specify_dtype_for_index_col(all_parsers, dtype, val, request):
347
+ # GH#9435
348
+ data = "a,b\n01,2"
349
+ parser = all_parsers
350
+ if dtype == object and parser.engine == "pyarrow":
351
+ request.applymarker(
352
+ pytest.mark.xfail(reason="Cannot disable type-inference for pyarrow engine")
353
+ )
354
+ result = parser.read_csv(StringIO(data), index_col="a", dtype={"a": dtype})
355
+ expected = DataFrame({"b": [2]}, index=Index([val], name="a"))
356
+ tm.assert_frame_equal(result, expected)
357
+
358
+
359
+ @xfail_pyarrow # TypeError: an integer is required
360
+ def test_multiindex_columns_not_leading_index_col(all_parsers):
361
+ # GH#38549
362
+ parser = all_parsers
363
+ data = """a,b,c,d
364
+ e,f,g,h
365
+ x,y,1,2
366
+ """
367
+ result = parser.read_csv(
368
+ StringIO(data),
369
+ header=[0, 1],
370
+ index_col=1,
371
+ )
372
+ cols = MultiIndex.from_tuples(
373
+ [("a", "e"), ("c", "g"), ("d", "h")], names=["b", "f"]
374
+ )
375
+ expected = DataFrame([["x", 1, 2]], columns=cols, index=["y"])
376
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_mangle_dupes.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that duplicate columns are handled appropriately when parsed by the
3
+ CSV engine. In general, the expected result is that they are either thoroughly
4
+ de-duplicated (if mangling requested) or ignored otherwise.
5
+ """
6
+ from io import StringIO
7
+
8
+ import pytest
9
+
10
+ from pandas import DataFrame
11
+ import pandas._testing as tm
12
+
13
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
14
+
15
+
16
+ pytestmark = pytest.mark.filterwarnings(
17
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
18
+ )
19
+
20
+
21
+ @xfail_pyarrow # ValueError: Found non-unique column index
22
+ def test_basic(all_parsers):
23
+ parser = all_parsers
24
+
25
+ data = "a,a,b,b,b\n1,2,3,4,5"
26
+ result = parser.read_csv(StringIO(data), sep=",")
27
+
28
+ expected = DataFrame([[1, 2, 3, 4, 5]], columns=["a", "a.1", "b", "b.1", "b.2"])
29
+ tm.assert_frame_equal(result, expected)
30
+
31
+
32
+ @xfail_pyarrow # ValueError: Found non-unique column index
33
+ def test_basic_names(all_parsers):
34
+ # See gh-7160
35
+ parser = all_parsers
36
+
37
+ data = "a,b,a\n0,1,2\n3,4,5"
38
+ expected = DataFrame([[0, 1, 2], [3, 4, 5]], columns=["a", "b", "a.1"])
39
+
40
+ result = parser.read_csv(StringIO(data))
41
+ tm.assert_frame_equal(result, expected)
42
+
43
+
44
+ def test_basic_names_raise(all_parsers):
45
+ # See gh-7160
46
+ parser = all_parsers
47
+
48
+ data = "0,1,2\n3,4,5"
49
+ with pytest.raises(ValueError, match="Duplicate names"):
50
+ parser.read_csv(StringIO(data), names=["a", "b", "a"])
51
+
52
+
53
+ @xfail_pyarrow # ValueError: Found non-unique column index
54
+ @pytest.mark.parametrize(
55
+ "data,expected",
56
+ [
57
+ ("a,a,a.1\n1,2,3", DataFrame([[1, 2, 3]], columns=["a", "a.2", "a.1"])),
58
+ (
59
+ "a,a,a.1,a.1.1,a.1.1.1,a.1.1.1.1\n1,2,3,4,5,6",
60
+ DataFrame(
61
+ [[1, 2, 3, 4, 5, 6]],
62
+ columns=["a", "a.2", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"],
63
+ ),
64
+ ),
65
+ (
66
+ "a,a,a.3,a.1,a.2,a,a\n1,2,3,4,5,6,7",
67
+ DataFrame(
68
+ [[1, 2, 3, 4, 5, 6, 7]],
69
+ columns=["a", "a.4", "a.3", "a.1", "a.2", "a.5", "a.6"],
70
+ ),
71
+ ),
72
+ ],
73
+ )
74
+ def test_thorough_mangle_columns(all_parsers, data, expected):
75
+ # see gh-17060
76
+ parser = all_parsers
77
+
78
+ result = parser.read_csv(StringIO(data))
79
+ tm.assert_frame_equal(result, expected)
80
+
81
+
82
+ @pytest.mark.parametrize(
83
+ "data,names,expected",
84
+ [
85
+ (
86
+ "a,b,b\n1,2,3",
87
+ ["a.1", "a.1", "a.1.1"],
88
+ DataFrame(
89
+ [["a", "b", "b"], ["1", "2", "3"]], columns=["a.1", "a.1.1", "a.1.1.1"]
90
+ ),
91
+ ),
92
+ (
93
+ "a,b,c,d,e,f\n1,2,3,4,5,6",
94
+ ["a", "a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"],
95
+ DataFrame(
96
+ [["a", "b", "c", "d", "e", "f"], ["1", "2", "3", "4", "5", "6"]],
97
+ columns=["a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1", "a.1.1.1.1.1"],
98
+ ),
99
+ ),
100
+ (
101
+ "a,b,c,d,e,f,g\n1,2,3,4,5,6,7",
102
+ ["a", "a", "a.3", "a.1", "a.2", "a", "a"],
103
+ DataFrame(
104
+ [
105
+ ["a", "b", "c", "d", "e", "f", "g"],
106
+ ["1", "2", "3", "4", "5", "6", "7"],
107
+ ],
108
+ columns=["a", "a.1", "a.3", "a.1.1", "a.2", "a.2.1", "a.3.1"],
109
+ ),
110
+ ),
111
+ ],
112
+ )
113
+ def test_thorough_mangle_names(all_parsers, data, names, expected):
114
+ # see gh-17095
115
+ parser = all_parsers
116
+
117
+ with pytest.raises(ValueError, match="Duplicate names"):
118
+ parser.read_csv(StringIO(data), names=names)
119
+
120
+
121
+ @xfail_pyarrow # AssertionError: DataFrame.columns are different
122
+ def test_mangled_unnamed_placeholders(all_parsers):
123
+ # xref gh-13017
124
+ orig_key = "0"
125
+ parser = all_parsers
126
+
127
+ orig_value = [1, 2, 3]
128
+ df = DataFrame({orig_key: orig_value})
129
+
130
+ # This test recursively updates `df`.
131
+ for i in range(3):
132
+ expected = DataFrame()
133
+
134
+ for j in range(i + 1):
135
+ col_name = "Unnamed: 0" + f".{1*j}" * min(j, 1)
136
+ expected.insert(loc=0, column=col_name, value=[0, 1, 2])
137
+
138
+ expected[orig_key] = orig_value
139
+ df = parser.read_csv(StringIO(df.to_csv()))
140
+
141
+ tm.assert_frame_equal(df, expected)
142
+
143
+
144
+ @xfail_pyarrow # ValueError: Found non-unique column index
145
+ def test_mangle_dupe_cols_already_exists(all_parsers):
146
+ # GH#14704
147
+ parser = all_parsers
148
+
149
+ data = "a,a,a.1,a,a.3,a.1,a.1.1\n1,2,3,4,5,6,7"
150
+ result = parser.read_csv(StringIO(data))
151
+ expected = DataFrame(
152
+ [[1, 2, 3, 4, 5, 6, 7]],
153
+ columns=["a", "a.2", "a.1", "a.4", "a.3", "a.1.2", "a.1.1"],
154
+ )
155
+ tm.assert_frame_equal(result, expected)
156
+
157
+
158
+ @xfail_pyarrow # ValueError: Found non-unique column index
159
+ def test_mangle_dupe_cols_already_exists_unnamed_col(all_parsers):
160
+ # GH#14704
161
+ parser = all_parsers
162
+
163
+ data = ",Unnamed: 0,,Unnamed: 2\n1,2,3,4"
164
+ result = parser.read_csv(StringIO(data))
165
+ expected = DataFrame(
166
+ [[1, 2, 3, 4]],
167
+ columns=["Unnamed: 0.1", "Unnamed: 0", "Unnamed: 2.1", "Unnamed: 2"],
168
+ )
169
+ tm.assert_frame_equal(result, expected)
170
+
171
+
172
+ @pytest.mark.parametrize("usecol, engine", [([0, 1, 1], "python"), ([0, 1, 1], "c")])
173
+ def test_mangle_cols_names(all_parsers, usecol, engine):
174
+ # GH 11823
175
+ parser = all_parsers
176
+ data = "1,2,3"
177
+ names = ["A", "A", "B"]
178
+ with pytest.raises(ValueError, match="Duplicate names"):
179
+ parser.read_csv(StringIO(data), names=names, usecols=usecol, engine=engine)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_multi_thread.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests multithreading behaviour for reading and
3
+ parsing files for each parser defined in parsers.py
4
+ """
5
+ from contextlib import ExitStack
6
+ from io import BytesIO
7
+ from multiprocessing.pool import ThreadPool
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ import pandas as pd
13
+ from pandas import DataFrame
14
+ import pandas._testing as tm
15
+
16
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
17
+
18
+ # We'll probably always skip these for pyarrow
19
+ # Maybe we'll add our own tests for pyarrow too
20
+ pytestmark = [
21
+ pytest.mark.single_cpu,
22
+ pytest.mark.slow,
23
+ ]
24
+
25
+
26
+ @xfail_pyarrow # ValueError: Found non-unique column index
27
+ def test_multi_thread_string_io_read_csv(all_parsers):
28
+ # see gh-11786
29
+ parser = all_parsers
30
+ max_row_range = 100
31
+ num_files = 10
32
+
33
+ bytes_to_df = (
34
+ "\n".join([f"{i:d},{i:d},{i:d}" for i in range(max_row_range)]).encode()
35
+ for _ in range(num_files)
36
+ )
37
+
38
+ # Read all files in many threads.
39
+ with ExitStack() as stack:
40
+ files = [stack.enter_context(BytesIO(b)) for b in bytes_to_df]
41
+
42
+ pool = stack.enter_context(ThreadPool(8))
43
+
44
+ results = pool.map(parser.read_csv, files)
45
+ first_result = results[0]
46
+
47
+ for result in results:
48
+ tm.assert_frame_equal(first_result, result)
49
+
50
+
51
+ def _generate_multi_thread_dataframe(parser, path, num_rows, num_tasks):
52
+ """
53
+ Generate a DataFrame via multi-thread.
54
+
55
+ Parameters
56
+ ----------
57
+ parser : BaseParser
58
+ The parser object to use for reading the data.
59
+ path : str
60
+ The location of the CSV file to read.
61
+ num_rows : int
62
+ The number of rows to read per task.
63
+ num_tasks : int
64
+ The number of tasks to use for reading this DataFrame.
65
+
66
+ Returns
67
+ -------
68
+ df : DataFrame
69
+ """
70
+
71
+ def reader(arg):
72
+ """
73
+ Create a reader for part of the CSV.
74
+
75
+ Parameters
76
+ ----------
77
+ arg : tuple
78
+ A tuple of the following:
79
+
80
+ * start : int
81
+ The starting row to start for parsing CSV
82
+ * nrows : int
83
+ The number of rows to read.
84
+
85
+ Returns
86
+ -------
87
+ df : DataFrame
88
+ """
89
+ start, nrows = arg
90
+
91
+ if not start:
92
+ return parser.read_csv(
93
+ path, index_col=0, header=0, nrows=nrows, parse_dates=["date"]
94
+ )
95
+
96
+ return parser.read_csv(
97
+ path,
98
+ index_col=0,
99
+ header=None,
100
+ skiprows=int(start) + 1,
101
+ nrows=nrows,
102
+ parse_dates=[9],
103
+ )
104
+
105
+ tasks = [
106
+ (num_rows * i // num_tasks, num_rows // num_tasks) for i in range(num_tasks)
107
+ ]
108
+
109
+ with ThreadPool(processes=num_tasks) as pool:
110
+ results = pool.map(reader, tasks)
111
+
112
+ header = results[0].columns
113
+
114
+ for r in results[1:]:
115
+ r.columns = header
116
+
117
+ final_dataframe = pd.concat(results)
118
+ return final_dataframe
119
+
120
+
121
+ @xfail_pyarrow # ValueError: The 'nrows' option is not supported
122
+ def test_multi_thread_path_multipart_read_csv(all_parsers):
123
+ # see gh-11786
124
+ num_tasks = 4
125
+ num_rows = 48
126
+
127
+ parser = all_parsers
128
+ file_name = "__thread_pool_reader__.csv"
129
+ df = DataFrame(
130
+ {
131
+ "a": np.random.default_rng(2).random(num_rows),
132
+ "b": np.random.default_rng(2).random(num_rows),
133
+ "c": np.random.default_rng(2).random(num_rows),
134
+ "d": np.random.default_rng(2).random(num_rows),
135
+ "e": np.random.default_rng(2).random(num_rows),
136
+ "foo": ["foo"] * num_rows,
137
+ "bar": ["bar"] * num_rows,
138
+ "baz": ["baz"] * num_rows,
139
+ "date": pd.date_range("20000101 09:00:00", periods=num_rows, freq="s"),
140
+ "int": np.arange(num_rows, dtype="int64"),
141
+ }
142
+ )
143
+
144
+ with tm.ensure_clean(file_name) as path:
145
+ df.to_csv(path)
146
+
147
+ final_dataframe = _generate_multi_thread_dataframe(
148
+ parser, path, num_rows, num_tasks
149
+ )
150
+ tm.assert_frame_equal(df, final_dataframe)
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/parser/test_na_values.py ADDED
@@ -0,0 +1,771 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests that NA values are properly handled during
3
+ parsing for all of the parsers defined in parsers.py
4
+ """
5
+ from io import StringIO
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas._libs.parsers import STR_NA_VALUES
11
+
12
+ from pandas import (
13
+ DataFrame,
14
+ Index,
15
+ MultiIndex,
16
+ )
17
+ import pandas._testing as tm
18
+
19
+ pytestmark = pytest.mark.filterwarnings(
20
+ "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
21
+ )
22
+
23
+ xfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")
24
+ skip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")
25
+
26
+
27
+ def test_string_nas(all_parsers):
28
+ parser = all_parsers
29
+ data = """A,B,C
30
+ a,b,c
31
+ d,,f
32
+ ,g,h
33
+ """
34
+ result = parser.read_csv(StringIO(data))
35
+ expected = DataFrame(
36
+ [["a", "b", "c"], ["d", np.nan, "f"], [np.nan, "g", "h"]],
37
+ columns=["A", "B", "C"],
38
+ )
39
+ if parser.engine == "pyarrow":
40
+ expected.loc[2, "A"] = None
41
+ expected.loc[1, "B"] = None
42
+ tm.assert_frame_equal(result, expected)
43
+
44
+
45
+ def test_detect_string_na(all_parsers):
46
+ parser = all_parsers
47
+ data = """A,B
48
+ foo,bar
49
+ NA,baz
50
+ NaN,nan
51
+ """
52
+ expected = DataFrame(
53
+ [["foo", "bar"], [np.nan, "baz"], [np.nan, np.nan]], columns=["A", "B"]
54
+ )
55
+ if parser.engine == "pyarrow":
56
+ expected.loc[[1, 2], "A"] = None
57
+ expected.loc[2, "B"] = None
58
+ result = parser.read_csv(StringIO(data))
59
+ tm.assert_frame_equal(result, expected)
60
+
61
+
62
+ @pytest.mark.parametrize(
63
+ "na_values",
64
+ [
65
+ ["-999.0", "-999"],
66
+ [-999, -999.0],
67
+ [-999.0, -999],
68
+ ["-999.0"],
69
+ ["-999"],
70
+ [-999.0],
71
+ [-999],
72
+ ],
73
+ )
74
+ @pytest.mark.parametrize(
75
+ "data",
76
+ [
77
+ """A,B
78
+ -999,1.2
79
+ 2,-999
80
+ 3,4.5
81
+ """,
82
+ """A,B
83
+ -999,1.200
84
+ 2,-999.000
85
+ 3,4.500
86
+ """,
87
+ ],
88
+ )
89
+ def test_non_string_na_values(all_parsers, data, na_values, request):
90
+ # see gh-3611: with an odd float format, we can't match
91
+ # the string "999.0" exactly but still need float matching
92
+ parser = all_parsers
93
+ expected = DataFrame([[np.nan, 1.2], [2.0, np.nan], [3.0, 4.5]], columns=["A", "B"])
94
+
95
+ if parser.engine == "pyarrow" and not all(isinstance(x, str) for x in na_values):
96
+ msg = "The 'pyarrow' engine requires all na_values to be strings"
97
+ with pytest.raises(TypeError, match=msg):
98
+ parser.read_csv(StringIO(data), na_values=na_values)
99
+ return
100
+ elif parser.engine == "pyarrow" and "-999.000" in data:
101
+ # bc the pyarrow engine does not include the float-ified version
102
+ # of "-999" -> -999, it does not match the entry with the trailing
103
+ # zeros, so "-999.000" is not treated as null.
104
+ mark = pytest.mark.xfail(
105
+ reason="pyarrow engined does not recognize equivalent floats"
106
+ )
107
+ request.applymarker(mark)
108
+
109
+ result = parser.read_csv(StringIO(data), na_values=na_values)
110
+ tm.assert_frame_equal(result, expected)
111
+
112
+
113
+ def test_default_na_values(all_parsers):
114
+ _NA_VALUES = {
115
+ "-1.#IND",
116
+ "1.#QNAN",
117
+ "1.#IND",
118
+ "-1.#QNAN",
119
+ "#N/A",
120
+ "N/A",
121
+ "n/a",
122
+ "NA",
123
+ "<NA>",
124
+ "#NA",
125
+ "NULL",
126
+ "null",
127
+ "NaN",
128
+ "nan",
129
+ "-NaN",
130
+ "-nan",
131
+ "#N/A N/A",
132
+ "",
133
+ "None",
134
+ }
135
+ assert _NA_VALUES == STR_NA_VALUES
136
+
137
+ parser = all_parsers
138
+ nv = len(_NA_VALUES)
139
+
140
+ def f(i, v):
141
+ if i == 0:
142
+ buf = ""
143
+ elif i > 0:
144
+ buf = "".join([","] * i)
145
+
146
+ buf = f"{buf}{v}"
147
+
148
+ if i < nv - 1:
149
+ joined = "".join([","] * (nv - i - 1))
150
+ buf = f"{buf}{joined}"
151
+
152
+ return buf
153
+
154
+ data = StringIO("\n".join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
155
+ expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
156
+
157
+ result = parser.read_csv(data, header=None)
158
+ tm.assert_frame_equal(result, expected)
159
+
160
+
161
+ @pytest.mark.parametrize("na_values", ["baz", ["baz"]])
162
+ def test_custom_na_values(all_parsers, na_values):
163
+ parser = all_parsers
164
+ data = """A,B,C
165
+ ignore,this,row
166
+ 1,NA,3
167
+ -1.#IND,5,baz
168
+ 7,8,NaN
169
+ """
170
+ expected = DataFrame(
171
+ [[1.0, np.nan, 3], [np.nan, 5, np.nan], [7, 8, np.nan]], columns=["A", "B", "C"]
172
+ )
173
+ if parser.engine == "pyarrow":
174
+ msg = "skiprows argument must be an integer when using engine='pyarrow'"
175
+ with pytest.raises(ValueError, match=msg):
176
+ parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1])
177
+ return
178
+
179
+ result = parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1])
180
+ tm.assert_frame_equal(result, expected)
181
+
182
+
183
+ def test_bool_na_values(all_parsers):
184
+ data = """A,B,C
185
+ True,False,True
186
+ NA,True,False
187
+ False,NA,True"""
188
+ parser = all_parsers
189
+ result = parser.read_csv(StringIO(data))
190
+ expected = DataFrame(
191
+ {
192
+ "A": np.array([True, np.nan, False], dtype=object),
193
+ "B": np.array([False, True, np.nan], dtype=object),
194
+ "C": [True, False, True],
195
+ }
196
+ )
197
+ if parser.engine == "pyarrow":
198
+ expected.loc[1, "A"] = None
199
+ expected.loc[2, "B"] = None
200
+ tm.assert_frame_equal(result, expected)
201
+
202
+
203
+ def test_na_value_dict(all_parsers):
204
+ data = """A,B,C
205
+ foo,bar,NA
206
+ bar,foo,foo
207
+ foo,bar,NA
208
+ bar,foo,foo"""
209
+ parser = all_parsers
210
+
211
+ if parser.engine == "pyarrow":
212
+ msg = "pyarrow engine doesn't support passing a dict for na_values"
213
+ with pytest.raises(ValueError, match=msg):
214
+ parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]})
215
+ return
216
+
217
+ df = parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]})
218
+ expected = DataFrame(
219
+ {
220
+ "A": [np.nan, "bar", np.nan, "bar"],
221
+ "B": [np.nan, "foo", np.nan, "foo"],
222
+ "C": [np.nan, "foo", np.nan, "foo"],
223
+ }
224
+ )
225
+ tm.assert_frame_equal(df, expected)
226
+
227
+
228
+ @pytest.mark.parametrize(
229
+ "index_col,expected",
230
+ [
231
+ (
232
+ [0],
233
+ DataFrame({"b": [np.nan], "c": [1], "d": [5]}, index=Index([0], name="a")),
234
+ ),
235
+ (
236
+ [0, 2],
237
+ DataFrame(
238
+ {"b": [np.nan], "d": [5]},
239
+ index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),
240
+ ),
241
+ ),
242
+ (
243
+ ["a", "c"],
244
+ DataFrame(
245
+ {"b": [np.nan], "d": [5]},
246
+ index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),
247
+ ),
248
+ ),
249
+ ],
250
+ )
251
+ def test_na_value_dict_multi_index(all_parsers, index_col, expected):
252
+ data = """\
253
+ a,b,c,d
254
+ 0,NA,1,5
255
+ """
256
+ parser = all_parsers
257
+ result = parser.read_csv(StringIO(data), na_values=set(), index_col=index_col)
258
+ tm.assert_frame_equal(result, expected)
259
+
260
+
261
+ @pytest.mark.parametrize(
262
+ "kwargs,expected",
263
+ [
264
+ (
265
+ {},
266
+ DataFrame(
267
+ {
268
+ "A": ["a", "b", np.nan, "d", "e", np.nan, "g"],
269
+ "B": [1, 2, 3, 4, 5, 6, 7],
270
+ "C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],
271
+ }
272
+ ),
273
+ ),
274
+ (
275
+ {"na_values": {"A": [], "C": []}, "keep_default_na": False},
276
+ DataFrame(
277
+ {
278
+ "A": ["a", "b", "", "d", "e", "nan", "g"],
279
+ "B": [1, 2, 3, 4, 5, 6, 7],
280
+ "C": ["one", "two", "three", "nan", "five", "", "seven"],
281
+ }
282
+ ),
283
+ ),
284
+ (
285
+ {"na_values": ["a"], "keep_default_na": False},
286
+ DataFrame(
287
+ {
288
+ "A": [np.nan, "b", "", "d", "e", "nan", "g"],
289
+ "B": [1, 2, 3, 4, 5, 6, 7],
290
+ "C": ["one", "two", "three", "nan", "five", "", "seven"],
291
+ }
292
+ ),
293
+ ),
294
+ (
295
+ {"na_values": {"A": [], "C": []}},
296
+ DataFrame(
297
+ {
298
+ "A": ["a", "b", np.nan, "d", "e", np.nan, "g"],
299
+ "B": [1, 2, 3, 4, 5, 6, 7],
300
+ "C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],
301
+ }
302
+ ),
303
+ ),
304
+ ],
305
+ )
306
+ def test_na_values_keep_default(all_parsers, kwargs, expected, request):
307
+ data = """\
308
+ A,B,C
309
+ a,1,one
310
+ b,2,two
311
+ ,3,three
312
+ d,4,nan
313
+ e,5,five
314
+ nan,6,
315
+ g,7,seven
316
+ """
317
+ parser = all_parsers
318
+ if parser.engine == "pyarrow":
319
+ if "na_values" in kwargs and isinstance(kwargs["na_values"], dict):
320
+ msg = "The pyarrow engine doesn't support passing a dict for na_values"
321
+ with pytest.raises(ValueError, match=msg):
322
+ parser.read_csv(StringIO(data), **kwargs)
323
+ return
324
+ mark = pytest.mark.xfail()
325
+ request.applymarker(mark)
326
+
327
+ result = parser.read_csv(StringIO(data), **kwargs)
328
+ tm.assert_frame_equal(result, expected)
329
+
330
+
331
+ def test_no_na_values_no_keep_default(all_parsers):
332
+ # see gh-4318: passing na_values=None and
333
+ # keep_default_na=False yields 'None" as a na_value
334
+ data = """\
335
+ A,B,C
336
+ a,1,None
337
+ b,2,two
338
+ ,3,None
339
+ d,4,nan
340
+ e,5,five
341
+ nan,6,
342
+ g,7,seven
343
+ """
344
+ parser = all_parsers
345
+ result = parser.read_csv(StringIO(data), keep_default_na=False)
346
+
347
+ expected = DataFrame(
348
+ {
349
+ "A": ["a", "b", "", "d", "e", "nan", "g"],
350
+ "B": [1, 2, 3, 4, 5, 6, 7],
351
+ "C": ["None", "two", "None", "nan", "five", "", "seven"],
352
+ }
353
+ )
354
+ tm.assert_frame_equal(result, expected)
355
+
356
+
357
+ def test_no_keep_default_na_dict_na_values(all_parsers):
358
+ # see gh-19227
359
+ data = "a,b\n,2"
360
+ parser = all_parsers
361
+
362
+ if parser.engine == "pyarrow":
363
+ msg = "The pyarrow engine doesn't support passing a dict for na_values"
364
+ with pytest.raises(ValueError, match=msg):
365
+ parser.read_csv(
366
+ StringIO(data), na_values={"b": ["2"]}, keep_default_na=False
367
+ )
368
+ return
369
+
370
+ result = parser.read_csv(
371
+ StringIO(data), na_values={"b": ["2"]}, keep_default_na=False
372
+ )
373
+ expected = DataFrame({"a": [""], "b": [np.nan]})
374
+ tm.assert_frame_equal(result, expected)
375
+
376
+
377
+ def test_no_keep_default_na_dict_na_scalar_values(all_parsers):
378
+ # see gh-19227
379
+ #
380
+ # Scalar values shouldn't cause the parsing to crash or fail.
381
+ data = "a,b\n1,2"
382
+ parser = all_parsers
383
+
384
+ if parser.engine == "pyarrow":
385
+ msg = "The pyarrow engine doesn't support passing a dict for na_values"
386
+ with pytest.raises(ValueError, match=msg):
387
+ parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False)
388
+ return
389
+
390
+ df = parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False)
391
+ expected = DataFrame({"a": [1], "b": [np.nan]})
392
+ tm.assert_frame_equal(df, expected)
393
+
394
+
395
+ @pytest.mark.parametrize("col_zero_na_values", [113125, "113125"])
396
+ def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_values):
397
+ # see gh-19227
398
+ data = """\
399
+ 113125,"blah","/blaha",kjsdkj,412.166,225.874,214.008
400
+ 729639,"qwer","",asdfkj,466.681,,252.373
401
+ """
402
+ parser = all_parsers
403
+ expected = DataFrame(
404
+ {
405
+ 0: [np.nan, 729639.0],
406
+ 1: [np.nan, "qwer"],
407
+ 2: ["/blaha", np.nan],
408
+ 3: ["kjsdkj", "asdfkj"],
409
+ 4: [412.166, 466.681],
410
+ 5: ["225.874", ""],
411
+ 6: [np.nan, 252.373],
412
+ }
413
+ )
414
+
415
+ if parser.engine == "pyarrow":
416
+ msg = "The pyarrow engine doesn't support passing a dict for na_values"
417
+ with pytest.raises(ValueError, match=msg):
418
+ parser.read_csv(
419
+ StringIO(data),
420
+ header=None,
421
+ keep_default_na=False,
422
+ na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values},
423
+ )
424
+ return
425
+
426
+ result = parser.read_csv(
427
+ StringIO(data),
428
+ header=None,
429
+ keep_default_na=False,
430
+ na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values},
431
+ )
432
+ tm.assert_frame_equal(result, expected)
433
+
434
+
435
+ @xfail_pyarrow # mismatched dtypes in both cases, FutureWarning in the True case
436
+ @pytest.mark.parametrize(
437
+ "na_filter,row_data",
438
+ [
439
+ (True, [[1, "A"], [np.nan, np.nan], [3, "C"]]),
440
+ (False, [["1", "A"], ["nan", "B"], ["3", "C"]]),
441
+ ],
442
+ )
443
+ def test_na_values_na_filter_override(all_parsers, na_filter, row_data):
444
+ data = """\
445
+ A,B
446
+ 1,A
447
+ nan,B
448
+ 3,C
449
+ """
450
+ parser = all_parsers
451
+ result = parser.read_csv(StringIO(data), na_values=["B"], na_filter=na_filter)
452
+
453
+ expected = DataFrame(row_data, columns=["A", "B"])
454
+ tm.assert_frame_equal(result, expected)
455
+
456
+
457
+ @skip_pyarrow # CSV parse error: Expected 8 columns, got 5:
458
+ def test_na_trailing_columns(all_parsers):
459
+ parser = all_parsers
460
+ data = """Date,Currency,Symbol,Type,Units,UnitPrice,Cost,Tax
461
+ 2012-03-14,USD,AAPL,BUY,1000
462
+ 2012-05-12,USD,SBUX,SELL,500"""
463
+
464
+ # Trailing columns should be all NaN.
465
+ result = parser.read_csv(StringIO(data))
466
+ expected = DataFrame(
467
+ [
468
+ ["2012-03-14", "USD", "AAPL", "BUY", 1000, np.nan, np.nan, np.nan],
469
+ ["2012-05-12", "USD", "SBUX", "SELL", 500, np.nan, np.nan, np.nan],
470
+ ],
471
+ columns=[
472
+ "Date",
473
+ "Currency",
474
+ "Symbol",
475
+ "Type",
476
+ "Units",
477
+ "UnitPrice",
478
+ "Cost",
479
+ "Tax",
480
+ ],
481
+ )
482
+ tm.assert_frame_equal(result, expected)
483
+
484
+
485
+ @pytest.mark.parametrize(
486
+ "na_values,row_data",
487
+ [
488
+ (1, [[np.nan, 2.0], [2.0, np.nan]]),
489
+ ({"a": 2, "b": 1}, [[1.0, 2.0], [np.nan, np.nan]]),
490
+ ],
491
+ )
492
+ def test_na_values_scalar(all_parsers, na_values, row_data):
493
+ # see gh-12224
494
+ parser = all_parsers
495
+ names = ["a", "b"]
496
+ data = "1,2\n2,1"
497
+
498
+ if parser.engine == "pyarrow" and isinstance(na_values, dict):
499
+ if isinstance(na_values, dict):
500
+ err = ValueError
501
+ msg = "The pyarrow engine doesn't support passing a dict for na_values"
502
+ else:
503
+ err = TypeError
504
+ msg = "The 'pyarrow' engine requires all na_values to be strings"
505
+ with pytest.raises(err, match=msg):
506
+ parser.read_csv(StringIO(data), names=names, na_values=na_values)
507
+ return
508
+ elif parser.engine == "pyarrow":
509
+ msg = "The 'pyarrow' engine requires all na_values to be strings"
510
+ with pytest.raises(TypeError, match=msg):
511
+ parser.read_csv(StringIO(data), names=names, na_values=na_values)
512
+ return
513
+
514
+ result = parser.read_csv(StringIO(data), names=names, na_values=na_values)
515
+ expected = DataFrame(row_data, columns=names)
516
+ tm.assert_frame_equal(result, expected)
517
+
518
+
519
+ def test_na_values_dict_aliasing(all_parsers):
520
+ parser = all_parsers
521
+ na_values = {"a": 2, "b": 1}
522
+ na_values_copy = na_values.copy()
523
+
524
+ names = ["a", "b"]
525
+ data = "1,2\n2,1"
526
+
527
+ expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
528
+
529
+ if parser.engine == "pyarrow":
530
+ msg = "The pyarrow engine doesn't support passing a dict for na_values"
531
+ with pytest.raises(ValueError, match=msg):
532
+ parser.read_csv(StringIO(data), names=names, na_values=na_values)
533
+ return
534
+
535
+ result = parser.read_csv(StringIO(data), names=names, na_values=na_values)
536
+
537
+ tm.assert_frame_equal(result, expected)
538
+ tm.assert_dict_equal(na_values, na_values_copy)
539
+
540
+
541
+ def test_na_values_dict_col_index(all_parsers):
542
+ # see gh-14203
543
+ data = "a\nfoo\n1"
544
+ parser = all_parsers
545
+ na_values = {0: "foo"}
546
+
547
+ if parser.engine == "pyarrow":
548
+ msg = "The pyarrow engine doesn't support passing a dict for na_values"
549
+ with pytest.raises(ValueError, match=msg):
550
+ parser.read_csv(StringIO(data), na_values=na_values)
551
+ return
552
+
553
+ result = parser.read_csv(StringIO(data), na_values=na_values)
554
+ expected = DataFrame({"a": [np.nan, 1]})
555
+ tm.assert_frame_equal(result, expected)
556
+
557
+
558
+ @pytest.mark.parametrize(
559
+ "data,kwargs,expected",
560
+ [
561
+ (
562
+ str(2**63) + "\n" + str(2**63 + 1),
563
+ {"na_values": [2**63]},
564
+ DataFrame([str(2**63), str(2**63 + 1)]),
565
+ ),
566
+ (str(2**63) + ",1" + "\n,2", {}, DataFrame([[str(2**63), 1], ["", 2]])),
567
+ (str(2**63) + "\n1", {"na_values": [2**63]}, DataFrame([np.nan, 1])),
568
+ ],
569
+ )
570
+ def test_na_values_uint64(all_parsers, data, kwargs, expected, request):
571
+ # see gh-14983
572
+ parser = all_parsers
573
+
574
+ if parser.engine == "pyarrow" and "na_values" in kwargs:
575
+ msg = "The 'pyarrow' engine requires all na_values to be strings"
576
+ with pytest.raises(TypeError, match=msg):
577
+ parser.read_csv(StringIO(data), header=None, **kwargs)
578
+ return
579
+ elif parser.engine == "pyarrow":
580
+ mark = pytest.mark.xfail(reason="Returns float64 instead of object")
581
+ request.applymarker(mark)
582
+
583
+ result = parser.read_csv(StringIO(data), header=None, **kwargs)
584
+ tm.assert_frame_equal(result, expected)
585
+
586
+
587
+ def test_empty_na_values_no_default_with_index(all_parsers):
588
+ # see gh-15835
589
+ data = "a,1\nb,2"
590
+ parser = all_parsers
591
+ expected = DataFrame({"1": [2]}, index=Index(["b"], name="a"))
592
+
593
+ result = parser.read_csv(StringIO(data), index_col=0, keep_default_na=False)
594
+ tm.assert_frame_equal(result, expected)
595
+
596
+
597
+ @pytest.mark.parametrize(
598
+ "na_filter,index_data", [(False, ["", "5"]), (True, [np.nan, 5.0])]
599
+ )
600
+ def test_no_na_filter_on_index(all_parsers, na_filter, index_data, request):
601
+ # see gh-5239
602
+ #
603
+ # Don't parse NA-values in index unless na_filter=True
604
+ parser = all_parsers
605
+ data = "a,b,c\n1,,3\n4,5,6"
606
+
607
+ if parser.engine == "pyarrow" and na_filter is False:
608
+ mark = pytest.mark.xfail(reason="mismatched index result")
609
+ request.applymarker(mark)
610
+
611
+ expected = DataFrame({"a": [1, 4], "c": [3, 6]}, index=Index(index_data, name="b"))
612
+ result = parser.read_csv(StringIO(data), index_col=[1], na_filter=na_filter)
613
+ tm.assert_frame_equal(result, expected)
614
+
615
+
616
+ def test_inf_na_values_with_int_index(all_parsers):
617
+ # see gh-17128
618
+ parser = all_parsers
619
+ data = "idx,col1,col2\n1,3,4\n2,inf,-inf"
620
+
621
+ # Don't fail with OverflowError with inf's and integer index column.
622
+ out = parser.read_csv(StringIO(data), index_col=[0], na_values=["inf", "-inf"])
623
+ expected = DataFrame(
624
+ {"col1": [3, np.nan], "col2": [4, np.nan]}, index=Index([1, 2], name="idx")
625
+ )
626
+ tm.assert_frame_equal(out, expected)
627
+
628
+
629
+ @xfail_pyarrow # mismatched shape
630
+ @pytest.mark.parametrize("na_filter", [True, False])
631
+ def test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter):
632
+ # see gh-20377
633
+ parser = all_parsers
634
+ data = "a,b,c\n1,,3\n4,5,6"
635
+
636
+ # na_filter=True --> missing value becomes NaN.
637
+ # na_filter=False --> missing value remains empty string.
638
+ empty = np.nan if na_filter else ""
639
+ expected = DataFrame({"a": ["1", "4"], "b": [empty, "5"], "c": ["3", "6"]})
640
+
641
+ result = parser.read_csv(StringIO(data), na_filter=na_filter, dtype=str)
642
+ tm.assert_frame_equal(result, expected)
643
+
644
+
645
+ @xfail_pyarrow # mismatched exception message
646
+ @pytest.mark.parametrize(
647
+ "data, na_values",
648
+ [
649
+ ("false,1\n,1\ntrue", None),
650
+ ("false,1\nnull,1\ntrue", None),
651
+ ("false,1\nnan,1\ntrue", None),
652
+ ("false,1\nfoo,1\ntrue", "foo"),
653
+ ("false,1\nfoo,1\ntrue", ["foo"]),
654
+ ("false,1\nfoo,1\ntrue", {"a": "foo"}),
655
+ ],
656
+ )
657
+ def test_cast_NA_to_bool_raises_error(all_parsers, data, na_values):
658
+ parser = all_parsers
659
+ msg = "|".join(
660
+ [
661
+ "Bool column has NA values in column [0a]",
662
+ "cannot safely convert passed user dtype of "
663
+ "bool for object dtyped data in column 0",
664
+ ]
665
+ )
666
+
667
+ with pytest.raises(ValueError, match=msg):
668
+ parser.read_csv(
669
+ StringIO(data),
670
+ header=None,
671
+ names=["a", "b"],
672
+ dtype={"a": "bool"},
673
+ na_values=na_values,
674
+ )
675
+
676
+
677
+ # TODO: this test isn't about the na_values keyword, it is about the empty entries
678
+ # being returned with NaN entries, whereas the pyarrow engine returns "nan"
679
+ @xfail_pyarrow # mismatched shapes
680
+ def test_str_nan_dropped(all_parsers):
681
+ # see gh-21131
682
+ parser = all_parsers
683
+
684
+ data = """File: small.csv,,
685
+ 10010010233,0123,654
686
+ foo,,bar
687
+ 01001000155,4530,898"""
688
+
689
+ result = parser.read_csv(
690
+ StringIO(data),
691
+ header=None,
692
+ names=["col1", "col2", "col3"],
693
+ dtype={"col1": str, "col2": str, "col3": str},
694
+ ).dropna()
695
+
696
+ expected = DataFrame(
697
+ {
698
+ "col1": ["10010010233", "01001000155"],
699
+ "col2": ["0123", "4530"],
700
+ "col3": ["654", "898"],
701
+ },
702
+ index=[1, 3],
703
+ )
704
+
705
+ tm.assert_frame_equal(result, expected)
706
+
707
+
708
+ def test_nan_multi_index(all_parsers):
709
+ # GH 42446
710
+ parser = all_parsers
711
+ data = "A,B,B\nX,Y,Z\n1,2,inf"
712
+
713
+ if parser.engine == "pyarrow":
714
+ msg = "The pyarrow engine doesn't support passing a dict for na_values"
715
+ with pytest.raises(ValueError, match=msg):
716
+ parser.read_csv(
717
+ StringIO(data), header=list(range(2)), na_values={("B", "Z"): "inf"}
718
+ )
719
+ return
720
+
721
+ result = parser.read_csv(
722
+ StringIO(data), header=list(range(2)), na_values={("B", "Z"): "inf"}
723
+ )
724
+
725
+ expected = DataFrame(
726
+ {
727
+ ("A", "X"): [1],
728
+ ("B", "Y"): [2],
729
+ ("B", "Z"): [np.nan],
730
+ }
731
+ )
732
+
733
+ tm.assert_frame_equal(result, expected)
734
+
735
+
736
+ @xfail_pyarrow # Failed: DID NOT RAISE <class 'ValueError'>; it casts the NaN to False
737
+ def test_bool_and_nan_to_bool(all_parsers):
738
+ # GH#42808
739
+ parser = all_parsers
740
+ data = """0
741
+ NaN
742
+ True
743
+ False
744
+ """
745
+ with pytest.raises(ValueError, match="NA values"):
746
+ parser.read_csv(StringIO(data), dtype="bool")
747
+
748
+
749
+ def test_bool_and_nan_to_int(all_parsers):
750
+ # GH#42808
751
+ parser = all_parsers
752
+ data = """0
753
+ NaN
754
+ True
755
+ False
756
+ """
757
+ with pytest.raises(ValueError, match="convert|NoneType"):
758
+ parser.read_csv(StringIO(data), dtype="int")
759
+
760
+
761
+ def test_bool_and_nan_to_float(all_parsers):
762
+ # GH#42808
763
+ parser = all_parsers
764
+ data = """0
765
+ NaN
766
+ True
767
+ False
768
+ """
769
+ result = parser.read_csv(StringIO(data), dtype="float")
770
+ expected = DataFrame.from_dict({"0": [np.nan, 1.0, 0.0]})
771
+ tm.assert_frame_equal(result, expected)