applied-ai-018 commited on
Commit
e47104f
·
verified ·
1 Parent(s): df69a1a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__init__.py +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/__init__.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_fillna.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_insert.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_repeat.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_shift.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.py +176 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.py +145 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.py +34 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py +76 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_ops.py +14 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.py +28 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_setops.py +254 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py +173 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py +0 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__init__.py +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py +7 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py +389 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py +753 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py +273 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py +912 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py +230 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py +606 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py +295 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py +472 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py +54 -0
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (204 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_fillna.cpython-310.pyc ADDED
Binary file (925 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_insert.cpython-310.pyc ADDED
Binary file (4.12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_repeat.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_shift.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import timedelta
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ Index,
9
+ NaT,
10
+ Timedelta,
11
+ TimedeltaIndex,
12
+ timedelta_range,
13
+ )
14
+ import pandas._testing as tm
15
+ from pandas.core.arrays import TimedeltaArray
16
+
17
+
18
+ class TestTimedeltaIndex:
19
+ def test_astype_object(self):
20
+ idx = timedelta_range(start="1 days", periods=4, freq="D", name="idx")
21
+ expected_list = [
22
+ Timedelta("1 days"),
23
+ Timedelta("2 days"),
24
+ Timedelta("3 days"),
25
+ Timedelta("4 days"),
26
+ ]
27
+ result = idx.astype(object)
28
+ expected = Index(expected_list, dtype=object, name="idx")
29
+ tm.assert_index_equal(result, expected)
30
+ assert idx.tolist() == expected_list
31
+
32
+ def test_astype_object_with_nat(self):
33
+ idx = TimedeltaIndex(
34
+ [timedelta(days=1), timedelta(days=2), NaT, timedelta(days=4)], name="idx"
35
+ )
36
+ expected_list = [
37
+ Timedelta("1 days"),
38
+ Timedelta("2 days"),
39
+ NaT,
40
+ Timedelta("4 days"),
41
+ ]
42
+ result = idx.astype(object)
43
+ expected = Index(expected_list, dtype=object, name="idx")
44
+ tm.assert_index_equal(result, expected)
45
+ assert idx.tolist() == expected_list
46
+
47
+ def test_astype(self):
48
+ # GH 13149, GH 13209
49
+ idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan], name="idx")
50
+
51
+ result = idx.astype(object)
52
+ expected = Index(
53
+ [Timedelta("1 days 03:46:40")] + [NaT] * 3, dtype=object, name="idx"
54
+ )
55
+ tm.assert_index_equal(result, expected)
56
+
57
+ result = idx.astype(np.int64)
58
+ expected = Index(
59
+ [100000000000000] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
60
+ )
61
+ tm.assert_index_equal(result, expected)
62
+
63
+ result = idx.astype(str)
64
+ expected = Index([str(x) for x in idx], name="idx", dtype=object)
65
+ tm.assert_index_equal(result, expected)
66
+
67
+ rng = timedelta_range("1 days", periods=10)
68
+ result = rng.astype("i8")
69
+ tm.assert_index_equal(result, Index(rng.asi8))
70
+ tm.assert_numpy_array_equal(rng.asi8, result.values)
71
+
72
+ def test_astype_uint(self):
73
+ arr = timedelta_range("1h", periods=2)
74
+
75
+ with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):
76
+ arr.astype("uint64")
77
+ with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):
78
+ arr.astype("uint32")
79
+
80
+ def test_astype_timedelta64(self):
81
+ # GH 13149, GH 13209
82
+ idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan])
83
+
84
+ msg = (
85
+ r"Cannot convert from timedelta64\[ns\] to timedelta64. "
86
+ "Supported resolutions are 's', 'ms', 'us', 'ns'"
87
+ )
88
+ with pytest.raises(ValueError, match=msg):
89
+ idx.astype("timedelta64")
90
+
91
+ result = idx.astype("timedelta64[ns]")
92
+ tm.assert_index_equal(result, idx)
93
+ assert result is not idx
94
+
95
+ result = idx.astype("timedelta64[ns]", copy=False)
96
+ tm.assert_index_equal(result, idx)
97
+ assert result is idx
98
+
99
+ def test_astype_to_td64d_raises(self, index_or_series):
100
+ # We don't support "D" reso
101
+ scalar = Timedelta(days=31)
102
+ td = index_or_series(
103
+ [scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT],
104
+ dtype="m8[ns]",
105
+ )
106
+ msg = (
107
+ r"Cannot convert from timedelta64\[ns\] to timedelta64\[D\]. "
108
+ "Supported resolutions are 's', 'ms', 'us', 'ns'"
109
+ )
110
+ with pytest.raises(ValueError, match=msg):
111
+ td.astype("timedelta64[D]")
112
+
113
+ def test_astype_ms_to_s(self, index_or_series):
114
+ scalar = Timedelta(days=31)
115
+ td = index_or_series(
116
+ [scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT],
117
+ dtype="m8[ns]",
118
+ )
119
+
120
+ exp_values = np.asarray(td).astype("m8[s]")
121
+ exp_tda = TimedeltaArray._simple_new(exp_values, dtype=exp_values.dtype)
122
+ expected = index_or_series(exp_tda)
123
+ assert expected.dtype == "m8[s]"
124
+ result = td.astype("timedelta64[s]")
125
+ tm.assert_equal(result, expected)
126
+
127
+ def test_astype_freq_conversion(self):
128
+ # pre-2.0 td64 astype converted to float64. now for supported units
129
+ # (s, ms, us, ns) this converts to the requested dtype.
130
+ # This matches TDA and Series
131
+ tdi = timedelta_range("1 Day", periods=30)
132
+
133
+ res = tdi.astype("m8[s]")
134
+ exp_values = np.asarray(tdi).astype("m8[s]")
135
+ exp_tda = TimedeltaArray._simple_new(
136
+ exp_values, dtype=exp_values.dtype, freq=tdi.freq
137
+ )
138
+ expected = Index(exp_tda)
139
+ assert expected.dtype == "m8[s]"
140
+ tm.assert_index_equal(res, expected)
141
+
142
+ # check this matches Series and TimedeltaArray
143
+ res = tdi._data.astype("m8[s]")
144
+ tm.assert_equal(res, expected._values)
145
+
146
+ res = tdi.to_series().astype("m8[s]")
147
+ tm.assert_equal(res._values, expected._values._with_freq(None))
148
+
149
+ @pytest.mark.parametrize("dtype", [float, "datetime64", "datetime64[ns]"])
150
+ def test_astype_raises(self, dtype):
151
+ # GH 13149, GH 13209
152
+ idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan])
153
+ msg = "Cannot cast TimedeltaIndex to dtype"
154
+ with pytest.raises(TypeError, match=msg):
155
+ idx.astype(dtype)
156
+
157
+ def test_astype_category(self):
158
+ obj = timedelta_range("1h", periods=2, freq="h")
159
+
160
+ result = obj.astype("category")
161
+ expected = pd.CategoricalIndex([Timedelta("1h"), Timedelta("2h")])
162
+ tm.assert_index_equal(result, expected)
163
+
164
+ result = obj._data.astype("category")
165
+ expected = expected.values
166
+ tm.assert_categorical_equal(result, expected)
167
+
168
+ def test_astype_array_fallback(self):
169
+ obj = timedelta_range("1h", periods=2)
170
+ result = obj.astype(bool)
171
+ expected = Index(np.array([True, True]))
172
+ tm.assert_index_equal(result, expected)
173
+
174
+ result = obj._data.astype(bool)
175
+ expected = np.array([True, True])
176
+ tm.assert_numpy_array_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import timedelta
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas._libs import lib
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ Index,
11
+ Timedelta,
12
+ TimedeltaIndex,
13
+ timedelta_range,
14
+ )
15
+ import pandas._testing as tm
16
+
17
+
18
+ class TestTimedeltaIndexInsert:
19
+ def test_insert(self):
20
+ idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx")
21
+
22
+ result = idx.insert(2, timedelta(days=5))
23
+ exp = TimedeltaIndex(["4day", "1day", "5day", "2day"], name="idx")
24
+ tm.assert_index_equal(result, exp)
25
+
26
+ # insertion of non-datetime should coerce to object index
27
+ result = idx.insert(1, "inserted")
28
+ expected = Index(
29
+ [Timedelta("4day"), "inserted", Timedelta("1day"), Timedelta("2day")],
30
+ name="idx",
31
+ )
32
+ assert not isinstance(result, TimedeltaIndex)
33
+ tm.assert_index_equal(result, expected)
34
+ assert result.name == expected.name
35
+
36
+ idx = timedelta_range("1day 00:00:01", periods=3, freq="s", name="idx")
37
+
38
+ # preserve freq
39
+ expected_0 = TimedeltaIndex(
40
+ ["1day", "1day 00:00:01", "1day 00:00:02", "1day 00:00:03"],
41
+ name="idx",
42
+ freq="s",
43
+ )
44
+ expected_3 = TimedeltaIndex(
45
+ ["1day 00:00:01", "1day 00:00:02", "1day 00:00:03", "1day 00:00:04"],
46
+ name="idx",
47
+ freq="s",
48
+ )
49
+
50
+ # reset freq to None
51
+ expected_1_nofreq = TimedeltaIndex(
52
+ ["1day 00:00:01", "1day 00:00:01", "1day 00:00:02", "1day 00:00:03"],
53
+ name="idx",
54
+ freq=None,
55
+ )
56
+ expected_3_nofreq = TimedeltaIndex(
57
+ ["1day 00:00:01", "1day 00:00:02", "1day 00:00:03", "1day 00:00:05"],
58
+ name="idx",
59
+ freq=None,
60
+ )
61
+
62
+ cases = [
63
+ (0, Timedelta("1day"), expected_0),
64
+ (-3, Timedelta("1day"), expected_0),
65
+ (3, Timedelta("1day 00:00:04"), expected_3),
66
+ (1, Timedelta("1day 00:00:01"), expected_1_nofreq),
67
+ (3, Timedelta("1day 00:00:05"), expected_3_nofreq),
68
+ ]
69
+
70
+ for n, d, expected in cases:
71
+ result = idx.insert(n, d)
72
+ tm.assert_index_equal(result, expected)
73
+ assert result.name == expected.name
74
+ assert result.freq == expected.freq
75
+
76
+ @pytest.mark.parametrize(
77
+ "null", [None, np.nan, np.timedelta64("NaT"), pd.NaT, pd.NA]
78
+ )
79
+ def test_insert_nat(self, null):
80
+ # GH 18295 (test missing)
81
+ idx = timedelta_range("1day", "3day")
82
+ result = idx.insert(1, null)
83
+ expected = TimedeltaIndex(["1day", pd.NaT, "2day", "3day"])
84
+ tm.assert_index_equal(result, expected)
85
+
86
+ def test_insert_invalid_na(self):
87
+ idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx")
88
+
89
+ item = np.datetime64("NaT")
90
+ result = idx.insert(0, item)
91
+
92
+ expected = Index([item] + list(idx), dtype=object, name="idx")
93
+ tm.assert_index_equal(result, expected)
94
+
95
+ # Also works if we pass a different dt64nat object
96
+ item2 = np.datetime64("NaT")
97
+ result = idx.insert(0, item2)
98
+ tm.assert_index_equal(result, expected)
99
+
100
+ @pytest.mark.parametrize(
101
+ "item", [0, np.int64(0), np.float64(0), np.array(0), np.datetime64(456, "us")]
102
+ )
103
+ def test_insert_mismatched_types_raises(self, item):
104
+ # GH#33703 dont cast these to td64
105
+ tdi = TimedeltaIndex(["4day", "1day", "2day"], name="idx")
106
+
107
+ result = tdi.insert(1, item)
108
+
109
+ expected = Index(
110
+ [tdi[0], lib.item_from_zerodim(item)] + list(tdi[1:]),
111
+ dtype=object,
112
+ name="idx",
113
+ )
114
+ tm.assert_index_equal(result, expected)
115
+
116
+ def test_insert_castable_str(self):
117
+ idx = timedelta_range("1day", "3day")
118
+
119
+ result = idx.insert(0, "1 Day")
120
+
121
+ expected = TimedeltaIndex([idx[0]] + list(idx))
122
+ tm.assert_index_equal(result, expected)
123
+
124
+ def test_insert_non_castable_str(self):
125
+ idx = timedelta_range("1day", "3day")
126
+
127
+ result = idx.insert(0, "foo")
128
+
129
+ expected = Index(["foo"] + list(idx), dtype=object)
130
+ tm.assert_index_equal(result, expected)
131
+
132
+ def test_insert_empty(self):
133
+ # Corner case inserting with length zero doesn't raise IndexError
134
+ # GH#33573 for freq preservation
135
+ idx = timedelta_range("1 Day", periods=3)
136
+ td = idx[0]
137
+
138
+ result = idx[:0].insert(0, td)
139
+ assert result.freq == "D"
140
+
141
+ with pytest.raises(IndexError, match="loc must be an integer between"):
142
+ result = idx[:0].insert(1, td)
143
+
144
+ with pytest.raises(IndexError, match="loc must be an integer between"):
145
+ result = idx[:0].insert(-1, td)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas import (
4
+ TimedeltaIndex,
5
+ timedelta_range,
6
+ )
7
+ import pandas._testing as tm
8
+
9
+
10
+ class TestRepeat:
11
+ def test_repeat(self):
12
+ index = timedelta_range("1 days", periods=2, freq="D")
13
+ exp = TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"])
14
+ for res in [index.repeat(2), np.repeat(index, 2)]:
15
+ tm.assert_index_equal(res, exp)
16
+ assert res.freq is None
17
+
18
+ index = TimedeltaIndex(["1 days", "NaT", "3 days"])
19
+ exp = TimedeltaIndex(
20
+ [
21
+ "1 days",
22
+ "1 days",
23
+ "1 days",
24
+ "NaT",
25
+ "NaT",
26
+ "NaT",
27
+ "3 days",
28
+ "3 days",
29
+ "3 days",
30
+ ]
31
+ )
32
+ for res in [index.repeat(3), np.repeat(index, 3)]:
33
+ tm.assert_index_equal(res, exp)
34
+ assert res.freq is None
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas.errors import NullFrequencyError
4
+
5
+ import pandas as pd
6
+ from pandas import TimedeltaIndex
7
+ import pandas._testing as tm
8
+
9
+
10
+ class TestTimedeltaIndexShift:
11
+ # -------------------------------------------------------------
12
+ # TimedeltaIndex.shift is used by __add__/__sub__
13
+
14
+ def test_tdi_shift_empty(self):
15
+ # GH#9903
16
+ idx = TimedeltaIndex([], name="xxx")
17
+ tm.assert_index_equal(idx.shift(0, freq="h"), idx)
18
+ tm.assert_index_equal(idx.shift(3, freq="h"), idx)
19
+
20
+ def test_tdi_shift_hours(self):
21
+ # GH#9903
22
+ idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
23
+ tm.assert_index_equal(idx.shift(0, freq="h"), idx)
24
+ exp = TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
25
+ tm.assert_index_equal(idx.shift(3, freq="h"), exp)
26
+ exp = TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
27
+ tm.assert_index_equal(idx.shift(-3, freq="h"), exp)
28
+
29
+ def test_tdi_shift_minutes(self):
30
+ # GH#9903
31
+ idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
32
+ tm.assert_index_equal(idx.shift(0, freq="min"), idx)
33
+ exp = TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
34
+ tm.assert_index_equal(idx.shift(3, freq="min"), exp)
35
+ exp = TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
36
+ tm.assert_index_equal(idx.shift(-3, freq="min"), exp)
37
+
38
+ def test_tdi_shift_int(self):
39
+ # GH#8083
40
+ tdi = pd.to_timedelta(range(5), unit="d")
41
+ trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
42
+ result = trange.shift(1)
43
+ expected = TimedeltaIndex(
44
+ [
45
+ "1 days 01:00:00",
46
+ "2 days 01:00:00",
47
+ "3 days 01:00:00",
48
+ "4 days 01:00:00",
49
+ "5 days 01:00:00",
50
+ ],
51
+ freq="D",
52
+ )
53
+ tm.assert_index_equal(result, expected)
54
+
55
+ def test_tdi_shift_nonstandard_freq(self):
56
+ # GH#8083
57
+ tdi = pd.to_timedelta(range(5), unit="d")
58
+ trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
59
+ result = trange.shift(3, freq="2D 1s")
60
+ expected = TimedeltaIndex(
61
+ [
62
+ "6 days 01:00:03",
63
+ "7 days 01:00:03",
64
+ "8 days 01:00:03",
65
+ "9 days 01:00:03",
66
+ "10 days 01:00:03",
67
+ ],
68
+ freq="D",
69
+ )
70
+ tm.assert_index_equal(result, expected)
71
+
72
+ def test_shift_no_freq(self):
73
+ # GH#19147
74
+ tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
75
+ with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
76
+ tdi.shift(2)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_ops.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas import (
2
+ TimedeltaIndex,
3
+ timedelta_range,
4
+ )
5
+ import pandas._testing as tm
6
+
7
+
8
+ class TestTimedeltaIndexOps:
9
+ def test_infer_freq(self, freq_sample):
10
+ # GH#11018
11
+ idx = timedelta_range("1", freq=freq_sample, periods=10)
12
+ result = TimedeltaIndex(idx.asi8, freq="infer")
13
+ tm.assert_index_equal(idx, result)
14
+ assert result.freq == freq_sample
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ TimedeltaIndex,
6
+ Timestamp,
7
+ )
8
+ import pandas._testing as tm
9
+
10
+
11
+ class TestSearchSorted:
12
+ def test_searchsorted_different_argument_classes(self, listlike_box):
13
+ idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
14
+ result = idx.searchsorted(listlike_box(idx))
15
+ expected = np.arange(len(idx), dtype=result.dtype)
16
+ tm.assert_numpy_array_equal(result, expected)
17
+
18
+ result = idx._data.searchsorted(listlike_box(idx))
19
+ tm.assert_numpy_array_equal(result, expected)
20
+
21
+ @pytest.mark.parametrize(
22
+ "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2]
23
+ )
24
+ def test_searchsorted_invalid_argument_dtype(self, arg):
25
+ idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
26
+ msg = "value should be a 'Timedelta', 'NaT', or array of those. Got"
27
+ with pytest.raises(TypeError, match=msg):
28
+ idx.searchsorted(arg)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_setops.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ Index,
7
+ TimedeltaIndex,
8
+ timedelta_range,
9
+ )
10
+ import pandas._testing as tm
11
+
12
+ from pandas.tseries.offsets import Hour
13
+
14
+
15
+ class TestTimedeltaIndex:
16
+ def test_union(self):
17
+ i1 = timedelta_range("1day", periods=5)
18
+ i2 = timedelta_range("3day", periods=5)
19
+ result = i1.union(i2)
20
+ expected = timedelta_range("1day", periods=7)
21
+ tm.assert_index_equal(result, expected)
22
+
23
+ i1 = Index(np.arange(0, 20, 2, dtype=np.int64))
24
+ i2 = timedelta_range(start="1 day", periods=10, freq="D")
25
+ i1.union(i2) # Works
26
+ i2.union(i1) # Fails with "AttributeError: can't set attribute"
27
+
28
+ def test_union_sort_false(self):
29
+ tdi = timedelta_range("1day", periods=5)
30
+
31
+ left = tdi[3:]
32
+ right = tdi[:3]
33
+
34
+ # Check that we are testing the desired code path
35
+ assert left._can_fast_union(right)
36
+
37
+ result = left.union(right)
38
+ tm.assert_index_equal(result, tdi)
39
+
40
+ result = left.union(right, sort=False)
41
+ expected = TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"])
42
+ tm.assert_index_equal(result, expected)
43
+
44
+ def test_union_coverage(self):
45
+ idx = TimedeltaIndex(["3d", "1d", "2d"])
46
+ ordered = TimedeltaIndex(idx.sort_values(), freq="infer")
47
+ result = ordered.union(idx)
48
+ tm.assert_index_equal(result, ordered)
49
+
50
+ result = ordered[:0].union(ordered)
51
+ tm.assert_index_equal(result, ordered)
52
+ assert result.freq == ordered.freq
53
+
54
+ def test_union_bug_1730(self):
55
+ rng_a = timedelta_range("1 day", periods=4, freq="3h")
56
+ rng_b = timedelta_range("1 day", periods=4, freq="4h")
57
+
58
+ result = rng_a.union(rng_b)
59
+ exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b)))
60
+ tm.assert_index_equal(result, exp)
61
+
62
+ def test_union_bug_1745(self):
63
+ left = TimedeltaIndex(["1 day 15:19:49.695000"])
64
+ right = TimedeltaIndex(
65
+ ["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"]
66
+ )
67
+
68
+ result = left.union(right)
69
+ exp = TimedeltaIndex(sorted(set(left) | set(right)))
70
+ tm.assert_index_equal(result, exp)
71
+
72
+ def test_union_bug_4564(self):
73
+ left = timedelta_range("1 day", "30d")
74
+ right = left + pd.offsets.Minute(15)
75
+
76
+ result = left.union(right)
77
+ exp = TimedeltaIndex(sorted(set(left) | set(right)))
78
+ tm.assert_index_equal(result, exp)
79
+
80
+ def test_union_freq_infer(self):
81
+ # When taking the union of two TimedeltaIndexes, we infer
82
+ # a freq even if the arguments don't have freq. This matches
83
+ # DatetimeIndex behavior.
84
+ tdi = timedelta_range("1 Day", periods=5)
85
+ left = tdi[[0, 1, 3, 4]]
86
+ right = tdi[[2, 3, 1]]
87
+
88
+ assert left.freq is None
89
+ assert right.freq is None
90
+
91
+ result = left.union(right)
92
+ tm.assert_index_equal(result, tdi)
93
+ assert result.freq == "D"
94
+
95
+ def test_intersection_bug_1708(self):
96
+ index_1 = timedelta_range("1 day", periods=4, freq="h")
97
+ index_2 = index_1 + pd.offsets.Hour(5)
98
+
99
+ result = index_1.intersection(index_2)
100
+ assert len(result) == 0
101
+
102
+ index_1 = timedelta_range("1 day", periods=4, freq="h")
103
+ index_2 = index_1 + pd.offsets.Hour(1)
104
+
105
+ result = index_1.intersection(index_2)
106
+ expected = timedelta_range("1 day 01:00:00", periods=3, freq="h")
107
+ tm.assert_index_equal(result, expected)
108
+ assert result.freq == expected.freq
109
+
110
+ def test_intersection_equal(self, sort):
111
+ # GH 24471 Test intersection outcome given the sort keyword
112
+ # for equal indices intersection should return the original index
113
+ first = timedelta_range("1 day", periods=4, freq="h")
114
+ second = timedelta_range("1 day", periods=4, freq="h")
115
+ intersect = first.intersection(second, sort=sort)
116
+ if sort is None:
117
+ tm.assert_index_equal(intersect, second.sort_values())
118
+ tm.assert_index_equal(intersect, second)
119
+
120
+ # Corner cases
121
+ inter = first.intersection(first, sort=sort)
122
+ assert inter is first
123
+
124
+ @pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)])
125
+ def test_intersection_zero_length(self, period_1, period_2, sort):
126
+ # GH 24471 test for non overlap the intersection should be zero length
127
+ index_1 = timedelta_range("1 day", periods=period_1, freq="h")
128
+ index_2 = timedelta_range("1 day", periods=period_2, freq="h")
129
+ expected = timedelta_range("1 day", periods=0, freq="h")
130
+ result = index_1.intersection(index_2, sort=sort)
131
+ tm.assert_index_equal(result, expected)
132
+
133
+ def test_zero_length_input_index(self, sort):
134
+ # GH 24966 test for 0-len intersections are copied
135
+ index_1 = timedelta_range("1 day", periods=0, freq="h")
136
+ index_2 = timedelta_range("1 day", periods=3, freq="h")
137
+ result = index_1.intersection(index_2, sort=sort)
138
+ assert index_1 is not result
139
+ assert index_2 is not result
140
+ tm.assert_copy(result, index_1)
141
+
142
+ @pytest.mark.parametrize(
143
+ "rng, expected",
144
+ # if target has the same name, it is preserved
145
+ [
146
+ (
147
+ timedelta_range("1 day", periods=5, freq="h", name="idx"),
148
+ timedelta_range("1 day", periods=4, freq="h", name="idx"),
149
+ ),
150
+ # if target name is different, it will be reset
151
+ (
152
+ timedelta_range("1 day", periods=5, freq="h", name="other"),
153
+ timedelta_range("1 day", periods=4, freq="h", name=None),
154
+ ),
155
+ # if no overlap exists return empty index
156
+ (
157
+ timedelta_range("1 day", periods=10, freq="h", name="idx")[5:],
158
+ TimedeltaIndex([], freq="h", name="idx"),
159
+ ),
160
+ ],
161
+ )
162
+ def test_intersection(self, rng, expected, sort):
163
+ # GH 4690 (with tz)
164
+ base = timedelta_range("1 day", periods=4, freq="h", name="idx")
165
+ result = base.intersection(rng, sort=sort)
166
+ if sort is None:
167
+ expected = expected.sort_values()
168
+ tm.assert_index_equal(result, expected)
169
+ assert result.name == expected.name
170
+ assert result.freq == expected.freq
171
+
172
+ @pytest.mark.parametrize(
173
+ "rng, expected",
174
+ # part intersection works
175
+ [
176
+ (
177
+ TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"),
178
+ TimedeltaIndex(["2 hour", "4 hour"], name="idx"),
179
+ ),
180
+ # reordered part intersection
181
+ (
182
+ TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"),
183
+ TimedeltaIndex(["1 hour", "2 hour"], name=None),
184
+ ),
185
+ # reversed index
186
+ (
187
+ TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[
188
+ ::-1
189
+ ],
190
+ TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"),
191
+ ),
192
+ ],
193
+ )
194
+ def test_intersection_non_monotonic(self, rng, expected, sort):
195
+ # 24471 non-monotonic
196
+ base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")
197
+ result = base.intersection(rng, sort=sort)
198
+ if sort is None:
199
+ expected = expected.sort_values()
200
+ tm.assert_index_equal(result, expected)
201
+ assert result.name == expected.name
202
+
203
+ # if reversed order, frequency is still the same
204
+ if all(base == rng[::-1]) and sort is None:
205
+ assert isinstance(result.freq, Hour)
206
+ else:
207
+ assert result.freq is None
208
+
209
+
210
+ class TestTimedeltaIndexDifference:
211
+ def test_difference_freq(self, sort):
212
+ # GH14323: Difference of TimedeltaIndex should not preserve frequency
213
+
214
+ index = timedelta_range("0 days", "5 days", freq="D")
215
+
216
+ other = timedelta_range("1 days", "4 days", freq="D")
217
+ expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
218
+ idx_diff = index.difference(other, sort)
219
+ tm.assert_index_equal(idx_diff, expected)
220
+ tm.assert_attr_equal("freq", idx_diff, expected)
221
+
222
+ # preserve frequency when the difference is a contiguous
223
+ # subset of the original range
224
+ other = timedelta_range("2 days", "5 days", freq="D")
225
+ idx_diff = index.difference(other, sort)
226
+ expected = TimedeltaIndex(["0 days", "1 days"], freq="D")
227
+ tm.assert_index_equal(idx_diff, expected)
228
+ tm.assert_attr_equal("freq", idx_diff, expected)
229
+
230
+ def test_difference_sort(self, sort):
231
+ index = TimedeltaIndex(
232
+ ["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"]
233
+ )
234
+
235
+ other = timedelta_range("1 days", "4 days", freq="D")
236
+ idx_diff = index.difference(other, sort)
237
+
238
+ expected = TimedeltaIndex(["5 days", "0 days"], freq=None)
239
+
240
+ if sort is None:
241
+ expected = expected.sort_values()
242
+
243
+ tm.assert_index_equal(idx_diff, expected)
244
+ tm.assert_attr_equal("freq", idx_diff, expected)
245
+
246
+ other = timedelta_range("2 days", "5 days", freq="D")
247
+ idx_diff = index.difference(other, sort)
248
+ expected = TimedeltaIndex(["1 days", "0 days"], freq=None)
249
+
250
+ if sort is None:
251
+ expected = expected.sort_values()
252
+
253
+ tm.assert_index_equal(idx_diff, expected)
254
+ tm.assert_attr_equal("freq", idx_diff, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ Timedelta,
6
+ TimedeltaIndex,
7
+ timedelta_range,
8
+ to_timedelta,
9
+ )
10
+ import pandas._testing as tm
11
+
12
+ from pandas.tseries.offsets import (
13
+ Day,
14
+ Second,
15
+ )
16
+
17
+
18
+ class TestTimedeltas:
19
+ def test_timedelta_range_unit(self):
20
+ # GH#49824
21
+ tdi = timedelta_range("0 Days", periods=10, freq="100000D", unit="s")
22
+ exp_arr = (np.arange(10, dtype="i8") * 100_000).view("m8[D]").astype("m8[s]")
23
+ tm.assert_numpy_array_equal(tdi.to_numpy(), exp_arr)
24
+
25
+ def test_timedelta_range(self):
26
+ expected = to_timedelta(np.arange(5), unit="D")
27
+ result = timedelta_range("0 days", periods=5, freq="D")
28
+ tm.assert_index_equal(result, expected)
29
+
30
+ expected = to_timedelta(np.arange(11), unit="D")
31
+ result = timedelta_range("0 days", "10 days", freq="D")
32
+ tm.assert_index_equal(result, expected)
33
+
34
+ expected = to_timedelta(np.arange(5), unit="D") + Second(2) + Day()
35
+ result = timedelta_range("1 days, 00:00:02", "5 days, 00:00:02", freq="D")
36
+ tm.assert_index_equal(result, expected)
37
+
38
+ expected = to_timedelta([1, 3, 5, 7, 9], unit="D") + Second(2)
39
+ result = timedelta_range("1 days, 00:00:02", periods=5, freq="2D")
40
+ tm.assert_index_equal(result, expected)
41
+
42
+ expected = to_timedelta(np.arange(50), unit="min") * 30
43
+ result = timedelta_range("0 days", freq="30min", periods=50)
44
+ tm.assert_index_equal(result, expected)
45
+
46
+ @pytest.mark.parametrize(
47
+ "depr_unit, unit",
48
+ [
49
+ ("H", "hour"),
50
+ ("T", "minute"),
51
+ ("t", "minute"),
52
+ ("S", "second"),
53
+ ("L", "millisecond"),
54
+ ("l", "millisecond"),
55
+ ("U", "microsecond"),
56
+ ("u", "microsecond"),
57
+ ("N", "nanosecond"),
58
+ ("n", "nanosecond"),
59
+ ],
60
+ )
61
+ def test_timedelta_units_H_T_S_L_U_N_deprecated(self, depr_unit, unit):
62
+ # GH#52536
63
+ depr_msg = (
64
+ f"'{depr_unit}' is deprecated and will be removed in a future version."
65
+ )
66
+
67
+ expected = to_timedelta(np.arange(5), unit=unit)
68
+ with tm.assert_produces_warning(FutureWarning, match=depr_msg):
69
+ result = to_timedelta(np.arange(5), unit=depr_unit)
70
+ tm.assert_index_equal(result, expected)
71
+
72
+ @pytest.mark.parametrize(
73
+ "periods, freq", [(3, "2D"), (5, "D"), (6, "19h12min"), (7, "16h"), (9, "12h")]
74
+ )
75
+ def test_linspace_behavior(self, periods, freq):
76
+ # GH 20976
77
+ result = timedelta_range(start="0 days", end="4 days", periods=periods)
78
+ expected = timedelta_range(start="0 days", end="4 days", freq=freq)
79
+ tm.assert_index_equal(result, expected)
80
+
81
+ @pytest.mark.parametrize("msg_freq, freq", [("H", "19H12min"), ("T", "19h12T")])
82
+ def test_timedelta_range_H_T_deprecated(self, freq, msg_freq):
83
+ # GH#52536
84
+ msg = f"'{msg_freq}' is deprecated and will be removed in a future version."
85
+
86
+ result = timedelta_range(start="0 days", end="4 days", periods=6)
87
+ with tm.assert_produces_warning(FutureWarning, match=msg):
88
+ expected = timedelta_range(start="0 days", end="4 days", freq=freq)
89
+ tm.assert_index_equal(result, expected)
90
+
91
+ def test_errors(self):
92
+ # not enough params
93
+ msg = (
94
+ "Of the four parameters: start, end, periods, and freq, "
95
+ "exactly three must be specified"
96
+ )
97
+ with pytest.raises(ValueError, match=msg):
98
+ timedelta_range(start="0 days")
99
+
100
+ with pytest.raises(ValueError, match=msg):
101
+ timedelta_range(end="5 days")
102
+
103
+ with pytest.raises(ValueError, match=msg):
104
+ timedelta_range(periods=2)
105
+
106
+ with pytest.raises(ValueError, match=msg):
107
+ timedelta_range()
108
+
109
+ # too many params
110
+ with pytest.raises(ValueError, match=msg):
111
+ timedelta_range(start="0 days", end="5 days", periods=10, freq="h")
112
+
113
+ @pytest.mark.parametrize(
114
+ "start, end, freq, expected_periods",
115
+ [
116
+ ("1D", "10D", "2D", (10 - 1) // 2 + 1),
117
+ ("2D", "30D", "3D", (30 - 2) // 3 + 1),
118
+ ("2s", "50s", "5s", (50 - 2) // 5 + 1),
119
+ # tests that worked before GH 33498:
120
+ ("4D", "16D", "3D", (16 - 4) // 3 + 1),
121
+ ("8D", "16D", "40s", (16 * 3600 * 24 - 8 * 3600 * 24) // 40 + 1),
122
+ ],
123
+ )
124
+ def test_timedelta_range_freq_divide_end(self, start, end, freq, expected_periods):
125
+ # GH 33498 only the cases where `(end % freq) == 0` used to fail
126
+ res = timedelta_range(start=start, end=end, freq=freq)
127
+ assert Timedelta(start) == res[0]
128
+ assert Timedelta(end) >= res[-1]
129
+ assert len(res) == expected_periods
130
+
131
+ def test_timedelta_range_infer_freq(self):
132
+ # https://github.com/pandas-dev/pandas/issues/35897
133
+ result = timedelta_range("0s", "1s", periods=31)
134
+ assert result.freq is None
135
+
136
+ @pytest.mark.parametrize(
137
+ "freq_depr, start, end, expected_values, expected_freq",
138
+ [
139
+ (
140
+ "3.5S",
141
+ "05:03:01",
142
+ "05:03:10",
143
+ ["0 days 05:03:01", "0 days 05:03:04.500000", "0 days 05:03:08"],
144
+ "3500ms",
145
+ ),
146
+ (
147
+ "2.5T",
148
+ "5 hours",
149
+ "5 hours 8 minutes",
150
+ [
151
+ "0 days 05:00:00",
152
+ "0 days 05:02:30",
153
+ "0 days 05:05:00",
154
+ "0 days 05:07:30",
155
+ ],
156
+ "150s",
157
+ ),
158
+ ],
159
+ )
160
+ def test_timedelta_range_deprecated_freq(
161
+ self, freq_depr, start, end, expected_values, expected_freq
162
+ ):
163
+ # GH#52536
164
+ msg = (
165
+ f"'{freq_depr[-1]}' is deprecated and will be removed in a future version."
166
+ )
167
+
168
+ with tm.assert_produces_warning(FutureWarning, match=msg):
169
+ result = timedelta_range(start=start, end=end, freq=freq_depr)
170
+ expected = TimedeltaIndex(
171
+ expected_values, dtype="timedelta64[ns]", freq=expected_freq
172
+ )
173
+ tm.assert_index_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc ADDED
Binary file (22.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc ADDED
Binary file (20.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc ADDED
Binary file (29.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc ADDED
Binary file (63.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc ADDED
Binary file (4.55 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc ADDED
Binary file (8.39 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (414 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc ADDED
Binary file (7.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc ADDED
Binary file (8.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc ADDED
Binary file (4.44 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+
4
+ @pytest.fixture(params=[True, False])
5
+ def sort(request):
6
+ """Boolean sort keyword for concat and DataFrame.append."""
7
+ return request.param
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime as dt
2
+ from itertools import combinations
3
+
4
+ import dateutil
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ Index,
12
+ Series,
13
+ Timestamp,
14
+ concat,
15
+ isna,
16
+ )
17
+ import pandas._testing as tm
18
+
19
+
20
+ class TestAppend:
21
+ def test_append(self, sort, float_frame):
22
+ mixed_frame = float_frame.copy()
23
+ mixed_frame["foo"] = "bar"
24
+
25
+ begin_index = float_frame.index[:5]
26
+ end_index = float_frame.index[5:]
27
+
28
+ begin_frame = float_frame.reindex(begin_index)
29
+ end_frame = float_frame.reindex(end_index)
30
+
31
+ appended = begin_frame._append(end_frame)
32
+ tm.assert_almost_equal(appended["A"], float_frame["A"])
33
+
34
+ del end_frame["A"]
35
+ partial_appended = begin_frame._append(end_frame, sort=sort)
36
+ assert "A" in partial_appended
37
+
38
+ partial_appended = end_frame._append(begin_frame, sort=sort)
39
+ assert "A" in partial_appended
40
+
41
+ # mixed type handling
42
+ appended = mixed_frame[:5]._append(mixed_frame[5:])
43
+ tm.assert_frame_equal(appended, mixed_frame)
44
+
45
+ # what to test here
46
+ mixed_appended = mixed_frame[:5]._append(float_frame[5:], sort=sort)
47
+ mixed_appended2 = float_frame[:5]._append(mixed_frame[5:], sort=sort)
48
+
49
+ # all equal except 'foo' column
50
+ tm.assert_frame_equal(
51
+ mixed_appended.reindex(columns=["A", "B", "C", "D"]),
52
+ mixed_appended2.reindex(columns=["A", "B", "C", "D"]),
53
+ )
54
+
55
+ def test_append_empty(self, float_frame):
56
+ empty = DataFrame()
57
+
58
+ appended = float_frame._append(empty)
59
+ tm.assert_frame_equal(float_frame, appended)
60
+ assert appended is not float_frame
61
+
62
+ appended = empty._append(float_frame)
63
+ tm.assert_frame_equal(float_frame, appended)
64
+ assert appended is not float_frame
65
+
66
+ def test_append_overlap_raises(self, float_frame):
67
+ msg = "Indexes have overlapping values"
68
+ with pytest.raises(ValueError, match=msg):
69
+ float_frame._append(float_frame, verify_integrity=True)
70
+
71
+ def test_append_new_columns(self):
72
+ # see gh-6129: new columns
73
+ df = DataFrame({"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}})
74
+ row = Series([5, 6, 7], index=["a", "b", "c"], name="z")
75
+ expected = DataFrame(
76
+ {
77
+ "a": {"x": 1, "y": 2, "z": 5},
78
+ "b": {"x": 3, "y": 4, "z": 6},
79
+ "c": {"z": 7},
80
+ }
81
+ )
82
+ result = df._append(row)
83
+ tm.assert_frame_equal(result, expected)
84
+
85
+ def test_append_length0_frame(self, sort):
86
+ df = DataFrame(columns=["A", "B", "C"])
87
+ df3 = DataFrame(index=[0, 1], columns=["A", "B"])
88
+ df5 = df._append(df3, sort=sort)
89
+
90
+ expected = DataFrame(index=[0, 1], columns=["A", "B", "C"])
91
+ tm.assert_frame_equal(df5, expected)
92
+
93
+ def test_append_records(self):
94
+ arr1 = np.zeros((2,), dtype=("i4,f4,S10"))
95
+ arr1[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")]
96
+
97
+ arr2 = np.zeros((3,), dtype=("i4,f4,S10"))
98
+ arr2[:] = [(3, 4.0, "foo"), (5, 6.0, "bar"), (7.0, 8.0, "baz")]
99
+
100
+ df1 = DataFrame(arr1)
101
+ df2 = DataFrame(arr2)
102
+
103
+ result = df1._append(df2, ignore_index=True)
104
+ expected = DataFrame(np.concatenate((arr1, arr2)))
105
+ tm.assert_frame_equal(result, expected)
106
+
107
+ # rewrite sort fixture, since we also want to test default of None
108
+ def test_append_sorts(self, sort):
109
+ df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
110
+ df2 = DataFrame({"a": [1, 2], "c": [3, 4]}, index=[2, 3])
111
+
112
+ result = df1._append(df2, sort=sort)
113
+
114
+ # for None / True
115
+ expected = DataFrame(
116
+ {"b": [1, 2, None, None], "a": [1, 2, 1, 2], "c": [None, None, 3, 4]},
117
+ columns=["a", "b", "c"],
118
+ )
119
+ if sort is False:
120
+ expected = expected[["b", "a", "c"]]
121
+ tm.assert_frame_equal(result, expected)
122
+
123
+ def test_append_different_columns(self, sort):
124
+ df = DataFrame(
125
+ {
126
+ "bools": np.random.default_rng(2).standard_normal(10) > 0,
127
+ "ints": np.random.default_rng(2).integers(0, 10, 10),
128
+ "floats": np.random.default_rng(2).standard_normal(10),
129
+ "strings": ["foo", "bar"] * 5,
130
+ }
131
+ )
132
+
133
+ a = df[:5].loc[:, ["bools", "ints", "floats"]]
134
+ b = df[5:].loc[:, ["strings", "ints", "floats"]]
135
+
136
+ appended = a._append(b, sort=sort)
137
+ assert isna(appended["strings"][0:4]).all()
138
+ assert isna(appended["bools"][5:]).all()
139
+
140
+ def test_append_many(self, sort, float_frame):
141
+ chunks = [
142
+ float_frame[:5],
143
+ float_frame[5:10],
144
+ float_frame[10:15],
145
+ float_frame[15:],
146
+ ]
147
+
148
+ result = chunks[0]._append(chunks[1:])
149
+ tm.assert_frame_equal(result, float_frame)
150
+
151
+ chunks[-1] = chunks[-1].copy()
152
+ chunks[-1]["foo"] = "bar"
153
+ result = chunks[0]._append(chunks[1:], sort=sort)
154
+ tm.assert_frame_equal(result.loc[:, float_frame.columns], float_frame)
155
+ assert (result["foo"][15:] == "bar").all()
156
+ assert result["foo"][:15].isna().all()
157
+
158
+ def test_append_preserve_index_name(self):
159
+ # #980
160
+ df1 = DataFrame(columns=["A", "B", "C"])
161
+ df1 = df1.set_index(["A"])
162
+ df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]], columns=["A", "B", "C"])
163
+ df2 = df2.set_index(["A"])
164
+
165
+ msg = "The behavior of array concatenation with empty entries is deprecated"
166
+ with tm.assert_produces_warning(FutureWarning, match=msg):
167
+ result = df1._append(df2)
168
+ assert result.index.name == "A"
169
+
170
+ indexes_can_append = [
171
+ pd.RangeIndex(3),
172
+ Index([4, 5, 6]),
173
+ Index([4.5, 5.5, 6.5]),
174
+ Index(list("abc")),
175
+ pd.CategoricalIndex("A B C".split()),
176
+ pd.CategoricalIndex("D E F".split(), ordered=True),
177
+ pd.IntervalIndex.from_breaks([7, 8, 9, 10]),
178
+ pd.DatetimeIndex(
179
+ [
180
+ dt.datetime(2013, 1, 3, 0, 0),
181
+ dt.datetime(2013, 1, 3, 6, 10),
182
+ dt.datetime(2013, 1, 3, 7, 12),
183
+ ]
184
+ ),
185
+ pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]),
186
+ ]
187
+
188
+ @pytest.mark.parametrize(
189
+ "index", indexes_can_append, ids=lambda x: type(x).__name__
190
+ )
191
+ def test_append_same_columns_type(self, index):
192
+ # GH18359
193
+
194
+ # df wider than ser
195
+ df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index)
196
+ ser_index = index[:2]
197
+ ser = Series([7, 8], index=ser_index, name=2)
198
+ result = df._append(ser)
199
+ expected = DataFrame(
200
+ [[1, 2, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index
201
+ )
202
+ # integer dtype is preserved for columns present in ser.index
203
+ assert expected.dtypes.iloc[0].kind == "i"
204
+ assert expected.dtypes.iloc[1].kind == "i"
205
+
206
+ tm.assert_frame_equal(result, expected)
207
+
208
+ # ser wider than df
209
+ ser_index = index
210
+ index = index[:2]
211
+ df = DataFrame([[1, 2], [4, 5]], columns=index)
212
+ ser = Series([7, 8, 9], index=ser_index, name=2)
213
+ result = df._append(ser)
214
+ expected = DataFrame(
215
+ [[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]],
216
+ index=[0, 1, 2],
217
+ columns=ser_index,
218
+ )
219
+ tm.assert_frame_equal(result, expected)
220
+
221
+ @pytest.mark.parametrize(
222
+ "df_columns, series_index",
223
+ combinations(indexes_can_append, r=2),
224
+ ids=lambda x: type(x).__name__,
225
+ )
226
+ def test_append_different_columns_types(self, df_columns, series_index):
227
+ # GH18359
228
+ # See also test 'test_append_different_columns_types_raises' below
229
+ # for errors raised when appending
230
+
231
+ df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns)
232
+ ser = Series([7, 8, 9], index=series_index, name=2)
233
+
234
+ result = df._append(ser)
235
+ idx_diff = ser.index.difference(df_columns)
236
+ combined_columns = Index(df_columns.tolist()).append(idx_diff)
237
+ expected = DataFrame(
238
+ [
239
+ [1.0, 2.0, 3.0, np.nan, np.nan, np.nan],
240
+ [4, 5, 6, np.nan, np.nan, np.nan],
241
+ [np.nan, np.nan, np.nan, 7, 8, 9],
242
+ ],
243
+ index=[0, 1, 2],
244
+ columns=combined_columns,
245
+ )
246
+ tm.assert_frame_equal(result, expected)
247
+
248
+ def test_append_dtype_coerce(self, sort):
249
+ # GH 4993
250
+ # appending with datetime will incorrectly convert datetime64
251
+
252
+ df1 = DataFrame(
253
+ index=[1, 2],
254
+ data=[dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0)],
255
+ columns=["start_time"],
256
+ )
257
+ df2 = DataFrame(
258
+ index=[4, 5],
259
+ data=[
260
+ [dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 3, 6, 10)],
261
+ [dt.datetime(2013, 1, 4, 0, 0), dt.datetime(2013, 1, 4, 7, 10)],
262
+ ],
263
+ columns=["start_time", "end_time"],
264
+ )
265
+
266
+ expected = concat(
267
+ [
268
+ Series(
269
+ [
270
+ pd.NaT,
271
+ pd.NaT,
272
+ dt.datetime(2013, 1, 3, 6, 10),
273
+ dt.datetime(2013, 1, 4, 7, 10),
274
+ ],
275
+ name="end_time",
276
+ ),
277
+ Series(
278
+ [
279
+ dt.datetime(2013, 1, 1, 0, 0),
280
+ dt.datetime(2013, 1, 2, 0, 0),
281
+ dt.datetime(2013, 1, 3, 0, 0),
282
+ dt.datetime(2013, 1, 4, 0, 0),
283
+ ],
284
+ name="start_time",
285
+ ),
286
+ ],
287
+ axis=1,
288
+ sort=sort,
289
+ )
290
+ result = df1._append(df2, ignore_index=True, sort=sort)
291
+ if sort:
292
+ expected = expected[["end_time", "start_time"]]
293
+ else:
294
+ expected = expected[["start_time", "end_time"]]
295
+
296
+ tm.assert_frame_equal(result, expected)
297
+
298
+ def test_append_missing_column_proper_upcast(self, sort):
299
+ df1 = DataFrame({"A": np.array([1, 2, 3, 4], dtype="i8")})
300
+ df2 = DataFrame({"B": np.array([True, False, True, False], dtype=bool)})
301
+
302
+ appended = df1._append(df2, ignore_index=True, sort=sort)
303
+ assert appended["A"].dtype == "f8"
304
+ assert appended["B"].dtype == "O"
305
+
306
+ def test_append_empty_frame_to_series_with_dateutil_tz(self):
307
+ # GH 23682
308
+ date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc())
309
+ ser = Series({"a": 1.0, "b": 2.0, "date": date})
310
+ df = DataFrame(columns=["c", "d"])
311
+ result_a = df._append(ser, ignore_index=True)
312
+ expected = DataFrame(
313
+ [[np.nan, np.nan, 1.0, 2.0, date]], columns=["c", "d", "a", "b", "date"]
314
+ )
315
+ # These columns get cast to object after append
316
+ expected["c"] = expected["c"].astype(object)
317
+ expected["d"] = expected["d"].astype(object)
318
+ tm.assert_frame_equal(result_a, expected)
319
+
320
+ expected = DataFrame(
321
+ [[np.nan, np.nan, 1.0, 2.0, date]] * 2, columns=["c", "d", "a", "b", "date"]
322
+ )
323
+ expected["c"] = expected["c"].astype(object)
324
+ expected["d"] = expected["d"].astype(object)
325
+ result_b = result_a._append(ser, ignore_index=True)
326
+ tm.assert_frame_equal(result_b, expected)
327
+
328
+ result = df._append([ser, ser], ignore_index=True)
329
+ tm.assert_frame_equal(result, expected)
330
+
331
+ def test_append_empty_tz_frame_with_datetime64ns(self, using_array_manager):
332
+ # https://github.com/pandas-dev/pandas/issues/35460
333
+ df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]")
334
+
335
+ # pd.NaT gets inferred as tz-naive, so append result is tz-naive
336
+ result = df._append({"a": pd.NaT}, ignore_index=True)
337
+ if using_array_manager:
338
+ expected = DataFrame({"a": [pd.NaT]}, dtype=object)
339
+ else:
340
+ expected = DataFrame({"a": [np.nan]}, dtype=object)
341
+ tm.assert_frame_equal(result, expected)
342
+
343
+ # also test with typed value to append
344
+ df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]")
345
+ other = Series({"a": pd.NaT}, dtype="datetime64[ns]")
346
+ result = df._append(other, ignore_index=True)
347
+ tm.assert_frame_equal(result, expected)
348
+
349
+ # mismatched tz
350
+ other = Series({"a": pd.NaT}, dtype="datetime64[ns, US/Pacific]")
351
+ result = df._append(other, ignore_index=True)
352
+ expected = DataFrame({"a": [pd.NaT]}).astype(object)
353
+ tm.assert_frame_equal(result, expected)
354
+
355
+ @pytest.mark.parametrize(
356
+ "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"]
357
+ )
358
+ @pytest.mark.parametrize("val", [1, "NaT"])
359
+ def test_append_empty_frame_with_timedelta64ns_nat(
360
+ self, dtype_str, val, using_array_manager
361
+ ):
362
+ # https://github.com/pandas-dev/pandas/issues/35460
363
+ df = DataFrame(columns=["a"]).astype(dtype_str)
364
+
365
+ other = DataFrame({"a": [np.timedelta64(val, "ns")]})
366
+ result = df._append(other, ignore_index=True)
367
+
368
+ expected = other.astype(object)
369
+ if isinstance(val, str) and dtype_str != "int64" and not using_array_manager:
370
+ # TODO: expected used to be `other.astype(object)` which is a more
371
+ # reasonable result. This was changed when tightening
372
+ # assert_frame_equal's treatment of mismatched NAs to match the
373
+ # existing behavior.
374
+ expected = DataFrame({"a": [np.nan]}, dtype=object)
375
+ tm.assert_frame_equal(result, expected)
376
+
377
+ @pytest.mark.parametrize(
378
+ "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"]
379
+ )
380
+ @pytest.mark.parametrize("val", [1, "NaT"])
381
+ def test_append_frame_with_timedelta64ns_nat(self, dtype_str, val):
382
+ # https://github.com/pandas-dev/pandas/issues/35460
383
+ df = DataFrame({"a": pd.array([1], dtype=dtype_str)})
384
+
385
+ other = DataFrame({"a": [np.timedelta64(val, "ns")]})
386
+ result = df._append(other, ignore_index=True)
387
+
388
+ expected = DataFrame({"a": [df.iloc[0, 0], other.iloc[0, 0]]}, dtype=object)
389
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py ADDED
@@ -0,0 +1,753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ Categorical,
7
+ DataFrame,
8
+ Index,
9
+ Series,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ @pytest.fixture(
15
+ params=list(
16
+ {
17
+ "bool": [True, False, True],
18
+ "int64": [1, 2, 3],
19
+ "float64": [1.1, np.nan, 3.3],
20
+ "category": Categorical(["X", "Y", "Z"]),
21
+ "object": ["a", "b", "c"],
22
+ "datetime64[ns]": [
23
+ pd.Timestamp("2011-01-01"),
24
+ pd.Timestamp("2011-01-02"),
25
+ pd.Timestamp("2011-01-03"),
26
+ ],
27
+ "datetime64[ns, US/Eastern]": [
28
+ pd.Timestamp("2011-01-01", tz="US/Eastern"),
29
+ pd.Timestamp("2011-01-02", tz="US/Eastern"),
30
+ pd.Timestamp("2011-01-03", tz="US/Eastern"),
31
+ ],
32
+ "timedelta64[ns]": [
33
+ pd.Timedelta("1 days"),
34
+ pd.Timedelta("2 days"),
35
+ pd.Timedelta("3 days"),
36
+ ],
37
+ "period[M]": [
38
+ pd.Period("2011-01", freq="M"),
39
+ pd.Period("2011-02", freq="M"),
40
+ pd.Period("2011-03", freq="M"),
41
+ ],
42
+ }.items()
43
+ )
44
+ )
45
+ def item(request):
46
+ key, data = request.param
47
+ return key, data
48
+
49
+
50
+ @pytest.fixture
51
+ def item2(item):
52
+ return item
53
+
54
+
55
+ class TestConcatAppendCommon:
56
+ """
57
+ Test common dtype coercion rules between concat and append.
58
+ """
59
+
60
+ def test_dtypes(self, item, index_or_series, using_infer_string):
61
+ # to confirm test case covers intended dtypes
62
+ typ, vals = item
63
+ obj = index_or_series(vals)
64
+ if typ == "object" and using_infer_string:
65
+ typ = "string"
66
+ if isinstance(obj, Index):
67
+ assert obj.dtype == typ
68
+ elif isinstance(obj, Series):
69
+ if typ.startswith("period"):
70
+ assert obj.dtype == "Period[M]"
71
+ else:
72
+ assert obj.dtype == typ
73
+
74
+ def test_concatlike_same_dtypes(self, item):
75
+ # GH 13660
76
+ typ1, vals1 = item
77
+
78
+ vals2 = vals1
79
+ vals3 = vals1
80
+
81
+ if typ1 == "category":
82
+ exp_data = Categorical(list(vals1) + list(vals2))
83
+ exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
84
+ else:
85
+ exp_data = vals1 + vals2
86
+ exp_data3 = vals1 + vals2 + vals3
87
+
88
+ # ----- Index ----- #
89
+
90
+ # index.append
91
+ res = Index(vals1).append(Index(vals2))
92
+ exp = Index(exp_data)
93
+ tm.assert_index_equal(res, exp)
94
+
95
+ # 3 elements
96
+ res = Index(vals1).append([Index(vals2), Index(vals3)])
97
+ exp = Index(exp_data3)
98
+ tm.assert_index_equal(res, exp)
99
+
100
+ # index.append name mismatch
101
+ i1 = Index(vals1, name="x")
102
+ i2 = Index(vals2, name="y")
103
+ res = i1.append(i2)
104
+ exp = Index(exp_data)
105
+ tm.assert_index_equal(res, exp)
106
+
107
+ # index.append name match
108
+ i1 = Index(vals1, name="x")
109
+ i2 = Index(vals2, name="x")
110
+ res = i1.append(i2)
111
+ exp = Index(exp_data, name="x")
112
+ tm.assert_index_equal(res, exp)
113
+
114
+ # cannot append non-index
115
+ with pytest.raises(TypeError, match="all inputs must be Index"):
116
+ Index(vals1).append(vals2)
117
+
118
+ with pytest.raises(TypeError, match="all inputs must be Index"):
119
+ Index(vals1).append([Index(vals2), vals3])
120
+
121
+ # ----- Series ----- #
122
+
123
+ # series.append
124
+ res = Series(vals1)._append(Series(vals2), ignore_index=True)
125
+ exp = Series(exp_data)
126
+ tm.assert_series_equal(res, exp, check_index_type=True)
127
+
128
+ # concat
129
+ res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
130
+ tm.assert_series_equal(res, exp, check_index_type=True)
131
+
132
+ # 3 elements
133
+ res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
134
+ exp = Series(exp_data3)
135
+ tm.assert_series_equal(res, exp)
136
+
137
+ res = pd.concat(
138
+ [Series(vals1), Series(vals2), Series(vals3)],
139
+ ignore_index=True,
140
+ )
141
+ tm.assert_series_equal(res, exp)
142
+
143
+ # name mismatch
144
+ s1 = Series(vals1, name="x")
145
+ s2 = Series(vals2, name="y")
146
+ res = s1._append(s2, ignore_index=True)
147
+ exp = Series(exp_data)
148
+ tm.assert_series_equal(res, exp, check_index_type=True)
149
+
150
+ res = pd.concat([s1, s2], ignore_index=True)
151
+ tm.assert_series_equal(res, exp, check_index_type=True)
152
+
153
+ # name match
154
+ s1 = Series(vals1, name="x")
155
+ s2 = Series(vals2, name="x")
156
+ res = s1._append(s2, ignore_index=True)
157
+ exp = Series(exp_data, name="x")
158
+ tm.assert_series_equal(res, exp, check_index_type=True)
159
+
160
+ res = pd.concat([s1, s2], ignore_index=True)
161
+ tm.assert_series_equal(res, exp, check_index_type=True)
162
+
163
+ # cannot append non-index
164
+ msg = (
165
+ r"cannot concatenate object of type '.+'; "
166
+ "only Series and DataFrame objs are valid"
167
+ )
168
+ with pytest.raises(TypeError, match=msg):
169
+ Series(vals1)._append(vals2)
170
+
171
+ with pytest.raises(TypeError, match=msg):
172
+ Series(vals1)._append([Series(vals2), vals3])
173
+
174
+ with pytest.raises(TypeError, match=msg):
175
+ pd.concat([Series(vals1), vals2])
176
+
177
+ with pytest.raises(TypeError, match=msg):
178
+ pd.concat([Series(vals1), Series(vals2), vals3])
179
+
180
+ def test_concatlike_dtypes_coercion(self, item, item2, request):
181
+ # GH 13660
182
+ typ1, vals1 = item
183
+ typ2, vals2 = item2
184
+
185
+ vals3 = vals2
186
+
187
+ # basically infer
188
+ exp_index_dtype = None
189
+ exp_series_dtype = None
190
+
191
+ if typ1 == typ2:
192
+ pytest.skip("same dtype is tested in test_concatlike_same_dtypes")
193
+ elif typ1 == "category" or typ2 == "category":
194
+ pytest.skip("categorical type tested elsewhere")
195
+
196
+ # specify expected dtype
197
+ if typ1 == "bool" and typ2 in ("int64", "float64"):
198
+ # series coerces to numeric based on numpy rule
199
+ # index doesn't because bool is object dtype
200
+ exp_series_dtype = typ2
201
+ mark = pytest.mark.xfail(reason="GH#39187 casting to object")
202
+ request.applymarker(mark)
203
+ elif typ2 == "bool" and typ1 in ("int64", "float64"):
204
+ exp_series_dtype = typ1
205
+ mark = pytest.mark.xfail(reason="GH#39187 casting to object")
206
+ request.applymarker(mark)
207
+ elif typ1 in {"datetime64[ns, US/Eastern]", "timedelta64[ns]"} or typ2 in {
208
+ "datetime64[ns, US/Eastern]",
209
+ "timedelta64[ns]",
210
+ }:
211
+ exp_index_dtype = object
212
+ exp_series_dtype = object
213
+
214
+ exp_data = vals1 + vals2
215
+ exp_data3 = vals1 + vals2 + vals3
216
+
217
+ # ----- Index ----- #
218
+
219
+ # index.append
220
+ # GH#39817
221
+ res = Index(vals1).append(Index(vals2))
222
+ exp = Index(exp_data, dtype=exp_index_dtype)
223
+ tm.assert_index_equal(res, exp)
224
+
225
+ # 3 elements
226
+ res = Index(vals1).append([Index(vals2), Index(vals3)])
227
+ exp = Index(exp_data3, dtype=exp_index_dtype)
228
+ tm.assert_index_equal(res, exp)
229
+
230
+ # ----- Series ----- #
231
+
232
+ # series._append
233
+ # GH#39817
234
+ res = Series(vals1)._append(Series(vals2), ignore_index=True)
235
+ exp = Series(exp_data, dtype=exp_series_dtype)
236
+ tm.assert_series_equal(res, exp, check_index_type=True)
237
+
238
+ # concat
239
+ # GH#39817
240
+ res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
241
+ tm.assert_series_equal(res, exp, check_index_type=True)
242
+
243
+ # 3 elements
244
+ # GH#39817
245
+ res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
246
+ exp = Series(exp_data3, dtype=exp_series_dtype)
247
+ tm.assert_series_equal(res, exp)
248
+
249
+ # GH#39817
250
+ res = pd.concat(
251
+ [Series(vals1), Series(vals2), Series(vals3)],
252
+ ignore_index=True,
253
+ )
254
+ tm.assert_series_equal(res, exp)
255
+
256
+ def test_concatlike_common_coerce_to_pandas_object(self):
257
+ # GH 13626
258
+ # result must be Timestamp/Timedelta, not datetime.datetime/timedelta
259
+ dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
260
+ tdi = pd.TimedeltaIndex(["1 days", "2 days"])
261
+
262
+ exp = Index(
263
+ [
264
+ pd.Timestamp("2011-01-01"),
265
+ pd.Timestamp("2011-01-02"),
266
+ pd.Timedelta("1 days"),
267
+ pd.Timedelta("2 days"),
268
+ ]
269
+ )
270
+
271
+ res = dti.append(tdi)
272
+ tm.assert_index_equal(res, exp)
273
+ assert isinstance(res[0], pd.Timestamp)
274
+ assert isinstance(res[-1], pd.Timedelta)
275
+
276
+ dts = Series(dti)
277
+ tds = Series(tdi)
278
+ res = dts._append(tds)
279
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
280
+ assert isinstance(res.iloc[0], pd.Timestamp)
281
+ assert isinstance(res.iloc[-1], pd.Timedelta)
282
+
283
+ res = pd.concat([dts, tds])
284
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
285
+ assert isinstance(res.iloc[0], pd.Timestamp)
286
+ assert isinstance(res.iloc[-1], pd.Timedelta)
287
+
288
+ def test_concatlike_datetimetz(self, tz_aware_fixture):
289
+ tz = tz_aware_fixture
290
+ # GH 7795
291
+ dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
292
+ dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
293
+
294
+ exp = pd.DatetimeIndex(
295
+ ["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
296
+ )
297
+
298
+ res = dti1.append(dti2)
299
+ tm.assert_index_equal(res, exp)
300
+
301
+ dts1 = Series(dti1)
302
+ dts2 = Series(dti2)
303
+ res = dts1._append(dts2)
304
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
305
+
306
+ res = pd.concat([dts1, dts2])
307
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
308
+
309
+ @pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
310
+ def test_concatlike_datetimetz_short(self, tz):
311
+ # GH#7795
312
+ ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
313
+ ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
314
+ df1 = DataFrame(0, index=ix1, columns=["A", "B"])
315
+ df2 = DataFrame(0, index=ix2, columns=["A", "B"])
316
+
317
+ exp_idx = pd.DatetimeIndex(
318
+ ["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
319
+ tz=tz,
320
+ ).as_unit("ns")
321
+ exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
322
+
323
+ tm.assert_frame_equal(df1._append(df2), exp)
324
+ tm.assert_frame_equal(pd.concat([df1, df2]), exp)
325
+
326
+ def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
327
+ tz = tz_aware_fixture
328
+ # GH 13660
329
+
330
+ # different tz coerces to object
331
+ dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
332
+ dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
333
+
334
+ exp = Index(
335
+ [
336
+ pd.Timestamp("2011-01-01", tz=tz),
337
+ pd.Timestamp("2011-01-02", tz=tz),
338
+ pd.Timestamp("2012-01-01"),
339
+ pd.Timestamp("2012-01-02"),
340
+ ],
341
+ dtype=object,
342
+ )
343
+
344
+ res = dti1.append(dti2)
345
+ tm.assert_index_equal(res, exp)
346
+
347
+ dts1 = Series(dti1)
348
+ dts2 = Series(dti2)
349
+ res = dts1._append(dts2)
350
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
351
+
352
+ res = pd.concat([dts1, dts2])
353
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
354
+
355
+ # different tz
356
+ dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
357
+
358
+ exp = Index(
359
+ [
360
+ pd.Timestamp("2011-01-01", tz=tz),
361
+ pd.Timestamp("2011-01-02", tz=tz),
362
+ pd.Timestamp("2012-01-01", tz="US/Pacific"),
363
+ pd.Timestamp("2012-01-02", tz="US/Pacific"),
364
+ ],
365
+ dtype=object,
366
+ )
367
+
368
+ res = dti1.append(dti3)
369
+ tm.assert_index_equal(res, exp)
370
+
371
+ dts1 = Series(dti1)
372
+ dts3 = Series(dti3)
373
+ res = dts1._append(dts3)
374
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
375
+
376
+ res = pd.concat([dts1, dts3])
377
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
378
+
379
+ def test_concatlike_common_period(self):
380
+ # GH 13660
381
+ pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
382
+ pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
383
+
384
+ exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
385
+
386
+ res = pi1.append(pi2)
387
+ tm.assert_index_equal(res, exp)
388
+
389
+ ps1 = Series(pi1)
390
+ ps2 = Series(pi2)
391
+ res = ps1._append(ps2)
392
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
393
+
394
+ res = pd.concat([ps1, ps2])
395
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
396
+
397
+ def test_concatlike_common_period_diff_freq_to_object(self):
398
+ # GH 13221
399
+ pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
400
+ pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
401
+
402
+ exp = Index(
403
+ [
404
+ pd.Period("2011-01", freq="M"),
405
+ pd.Period("2011-02", freq="M"),
406
+ pd.Period("2012-01-01", freq="D"),
407
+ pd.Period("2012-02-01", freq="D"),
408
+ ],
409
+ dtype=object,
410
+ )
411
+
412
+ res = pi1.append(pi2)
413
+ tm.assert_index_equal(res, exp)
414
+
415
+ ps1 = Series(pi1)
416
+ ps2 = Series(pi2)
417
+ res = ps1._append(ps2)
418
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
419
+
420
+ res = pd.concat([ps1, ps2])
421
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
422
+
423
+ def test_concatlike_common_period_mixed_dt_to_object(self):
424
+ # GH 13221
425
+ # different datetimelike
426
+ pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
427
+ tdi = pd.TimedeltaIndex(["1 days", "2 days"])
428
+ exp = Index(
429
+ [
430
+ pd.Period("2011-01", freq="M"),
431
+ pd.Period("2011-02", freq="M"),
432
+ pd.Timedelta("1 days"),
433
+ pd.Timedelta("2 days"),
434
+ ],
435
+ dtype=object,
436
+ )
437
+
438
+ res = pi1.append(tdi)
439
+ tm.assert_index_equal(res, exp)
440
+
441
+ ps1 = Series(pi1)
442
+ tds = Series(tdi)
443
+ res = ps1._append(tds)
444
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
445
+
446
+ res = pd.concat([ps1, tds])
447
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
448
+
449
+ # inverse
450
+ exp = Index(
451
+ [
452
+ pd.Timedelta("1 days"),
453
+ pd.Timedelta("2 days"),
454
+ pd.Period("2011-01", freq="M"),
455
+ pd.Period("2011-02", freq="M"),
456
+ ],
457
+ dtype=object,
458
+ )
459
+
460
+ res = tdi.append(pi1)
461
+ tm.assert_index_equal(res, exp)
462
+
463
+ ps1 = Series(pi1)
464
+ tds = Series(tdi)
465
+ res = tds._append(ps1)
466
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
467
+
468
+ res = pd.concat([tds, ps1])
469
+ tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
470
+
471
+ def test_concat_categorical(self):
472
+ # GH 13524
473
+
474
+ # same categories -> category
475
+ s1 = Series([1, 2, np.nan], dtype="category")
476
+ s2 = Series([2, 1, 2], dtype="category")
477
+
478
+ exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
479
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
480
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
481
+
482
+ # partially different categories => not-category
483
+ s1 = Series([3, 2], dtype="category")
484
+ s2 = Series([2, 1], dtype="category")
485
+
486
+ exp = Series([3, 2, 2, 1])
487
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
488
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
489
+
490
+ # completely different categories (same dtype) => not-category
491
+ s1 = Series([10, 11, np.nan], dtype="category")
492
+ s2 = Series([np.nan, 1, 3, 2], dtype="category")
493
+
494
+ exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
495
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
496
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
497
+
498
+ def test_union_categorical_same_categories_different_order(self):
499
+ # https://github.com/pandas-dev/pandas/issues/19096
500
+ a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
501
+ b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
502
+ result = pd.concat([a, b], ignore_index=True)
503
+ expected = Series(
504
+ Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
505
+ )
506
+ tm.assert_series_equal(result, expected)
507
+
508
+ def test_concat_categorical_coercion(self):
509
+ # GH 13524
510
+
511
+ # category + not-category => not-category
512
+ s1 = Series([1, 2, np.nan], dtype="category")
513
+ s2 = Series([2, 1, 2])
514
+
515
+ exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64)
516
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
517
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
518
+
519
+ # result shouldn't be affected by 1st elem dtype
520
+ exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64)
521
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
522
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
523
+
524
+ # all values are not in category => not-category
525
+ s1 = Series([3, 2], dtype="category")
526
+ s2 = Series([2, 1])
527
+
528
+ exp = Series([3, 2, 2, 1])
529
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
530
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
531
+
532
+ exp = Series([2, 1, 3, 2])
533
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
534
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
535
+
536
+ # completely different categories => not-category
537
+ s1 = Series([10, 11, np.nan], dtype="category")
538
+ s2 = Series([1, 3, 2])
539
+
540
+ exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64)
541
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
542
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
543
+
544
+ exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64)
545
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
546
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
547
+
548
+ # different dtype => not-category
549
+ s1 = Series([10, 11, np.nan], dtype="category")
550
+ s2 = Series(["a", "b", "c"])
551
+
552
+ exp = Series([10, 11, np.nan, "a", "b", "c"])
553
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
554
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
555
+
556
+ exp = Series(["a", "b", "c", 10, 11, np.nan])
557
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
558
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
559
+
560
+ # if normal series only contains NaN-likes => not-category
561
+ s1 = Series([10, 11], dtype="category")
562
+ s2 = Series([np.nan, np.nan, np.nan])
563
+
564
+ exp = Series([10, 11, np.nan, np.nan, np.nan])
565
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
566
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
567
+
568
+ exp = Series([np.nan, np.nan, np.nan, 10, 11])
569
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
570
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
571
+
572
+ def test_concat_categorical_3elem_coercion(self):
573
+ # GH 13524
574
+
575
+ # mixed dtypes => not-category
576
+ s1 = Series([1, 2, np.nan], dtype="category")
577
+ s2 = Series([2, 1, 2], dtype="category")
578
+ s3 = Series([1, 2, 1, 2, np.nan])
579
+
580
+ exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
581
+ tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
582
+ tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
583
+
584
+ exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
585
+ tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
586
+ tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
587
+
588
+ # values are all in either category => not-category
589
+ s1 = Series([4, 5, 6], dtype="category")
590
+ s2 = Series([1, 2, 3], dtype="category")
591
+ s3 = Series([1, 3, 4])
592
+
593
+ exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
594
+ tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
595
+ tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
596
+
597
+ exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
598
+ tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
599
+ tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
600
+
601
+ # values are all in either category => not-category
602
+ s1 = Series([4, 5, 6], dtype="category")
603
+ s2 = Series([1, 2, 3], dtype="category")
604
+ s3 = Series([10, 11, 12])
605
+
606
+ exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
607
+ tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
608
+ tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
609
+
610
+ exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
611
+ tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
612
+ tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
613
+
614
+ def test_concat_categorical_multi_coercion(self):
615
+ # GH 13524
616
+
617
+ s1 = Series([1, 3], dtype="category")
618
+ s2 = Series([3, 4], dtype="category")
619
+ s3 = Series([2, 3])
620
+ s4 = Series([2, 2], dtype="category")
621
+ s5 = Series([1, np.nan])
622
+ s6 = Series([1, 3, 2], dtype="category")
623
+
624
+ # mixed dtype, values are all in categories => not-category
625
+ exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2])
626
+ res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True)
627
+ tm.assert_series_equal(res, exp)
628
+ res = s1._append([s2, s3, s4, s5, s6], ignore_index=True)
629
+ tm.assert_series_equal(res, exp)
630
+
631
+ exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3])
632
+ res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True)
633
+ tm.assert_series_equal(res, exp)
634
+ res = s6._append([s5, s4, s3, s2, s1], ignore_index=True)
635
+ tm.assert_series_equal(res, exp)
636
+
637
+ def test_concat_categorical_ordered(self):
638
+ # GH 13524
639
+
640
+ s1 = Series(Categorical([1, 2, np.nan], ordered=True))
641
+ s2 = Series(Categorical([2, 1, 2], ordered=True))
642
+
643
+ exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True))
644
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
645
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
646
+
647
+ exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True))
648
+ tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp)
649
+ tm.assert_series_equal(s1._append([s2, s1], ignore_index=True), exp)
650
+
651
+ def test_concat_categorical_coercion_nan(self):
652
+ # GH 13524
653
+
654
+ # some edge cases
655
+ # category + not-category => not category
656
+ s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category")
657
+ s2 = Series([np.nan, 1])
658
+
659
+ exp = Series([np.nan, np.nan, np.nan, 1])
660
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
661
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
662
+
663
+ s1 = Series([1, np.nan], dtype="category")
664
+ s2 = Series([np.nan, np.nan])
665
+
666
+ exp = Series([1, np.nan, np.nan, np.nan], dtype="float")
667
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
668
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
669
+
670
+ # mixed dtype, all nan-likes => not-category
671
+ s1 = Series([np.nan, np.nan], dtype="category")
672
+ s2 = Series([np.nan, np.nan])
673
+
674
+ exp = Series([np.nan, np.nan, np.nan, np.nan])
675
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
676
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
677
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
678
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
679
+
680
+ # all category nan-likes => category
681
+ s1 = Series([np.nan, np.nan], dtype="category")
682
+ s2 = Series([np.nan, np.nan], dtype="category")
683
+
684
+ exp = Series([np.nan, np.nan, np.nan, np.nan], dtype="category")
685
+
686
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
687
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
688
+
689
+ def test_concat_categorical_empty(self):
690
+ # GH 13524
691
+
692
+ s1 = Series([], dtype="category")
693
+ s2 = Series([1, 2], dtype="category")
694
+
695
+ msg = "The behavior of array concatenation with empty entries is deprecated"
696
+ with tm.assert_produces_warning(FutureWarning, match=msg):
697
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
698
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
699
+
700
+ with tm.assert_produces_warning(FutureWarning, match=msg):
701
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
702
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), s2)
703
+
704
+ s1 = Series([], dtype="category")
705
+ s2 = Series([], dtype="category")
706
+
707
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
708
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
709
+
710
+ s1 = Series([], dtype="category")
711
+ s2 = Series([], dtype="object")
712
+
713
+ # different dtype => not-category
714
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2)
715
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), s2)
716
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2)
717
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), s2)
718
+
719
+ s1 = Series([], dtype="category")
720
+ s2 = Series([np.nan, np.nan])
721
+
722
+ # empty Series is ignored
723
+ exp = Series([np.nan, np.nan])
724
+ with tm.assert_produces_warning(FutureWarning, match=msg):
725
+ tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
726
+ tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
727
+
728
+ with tm.assert_produces_warning(FutureWarning, match=msg):
729
+ tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
730
+ tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
731
+
732
+ def test_categorical_concat_append(self):
733
+ cat = Categorical(["a", "b"], categories=["a", "b"])
734
+ vals = [1, 2]
735
+ df = DataFrame({"cats": cat, "vals": vals})
736
+ cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
737
+ vals2 = [1, 2, 1, 2]
738
+ exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
739
+
740
+ tm.assert_frame_equal(pd.concat([df, df]), exp)
741
+ tm.assert_frame_equal(df._append(df), exp)
742
+
743
+ # GH 13524 can concat different categories
744
+ cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
745
+ vals3 = [1, 2]
746
+ df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
747
+
748
+ res = pd.concat([df, df_different_categories], ignore_index=True)
749
+ exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
750
+ tm.assert_frame_equal(res, exp)
751
+
752
+ res = df._append(df_different_categories, ignore_index=True)
753
+ tm.assert_frame_equal(res, exp)
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+
5
+ from pandas.core.dtypes.dtypes import CategoricalDtype
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ Categorical,
10
+ DataFrame,
11
+ Series,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+
16
+ class TestCategoricalConcat:
17
+ def test_categorical_concat(self, sort):
18
+ # See GH 10177
19
+ df1 = DataFrame(
20
+ np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
21
+ )
22
+
23
+ df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
24
+
25
+ cat_values = ["one", "one", "two", "one", "two", "two", "one"]
26
+ df2["h"] = Series(Categorical(cat_values))
27
+
28
+ res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
29
+ exp = DataFrame(
30
+ {
31
+ "a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
32
+ "b": [
33
+ 1,
34
+ 4,
35
+ 7,
36
+ 10,
37
+ 13,
38
+ 16,
39
+ np.nan,
40
+ np.nan,
41
+ np.nan,
42
+ np.nan,
43
+ np.nan,
44
+ np.nan,
45
+ np.nan,
46
+ ],
47
+ "c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
48
+ "h": [None] * 6 + cat_values,
49
+ }
50
+ )
51
+ exp["h"] = exp["h"].astype(df2["h"].dtype)
52
+ tm.assert_frame_equal(res, exp)
53
+
54
+ def test_categorical_concat_dtypes(self, using_infer_string):
55
+ # GH8143
56
+ index = ["cat", "obj", "num"]
57
+ cat = Categorical(["a", "b", "c"])
58
+ obj = Series(["a", "b", "c"])
59
+ num = Series([1, 2, 3])
60
+ df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
61
+
62
+ result = df.dtypes == (
63
+ object if not using_infer_string else "string[pyarrow_numpy]"
64
+ )
65
+ expected = Series([False, True, False], index=index)
66
+ tm.assert_series_equal(result, expected)
67
+
68
+ result = df.dtypes == "int64"
69
+ expected = Series([False, False, True], index=index)
70
+ tm.assert_series_equal(result, expected)
71
+
72
+ result = df.dtypes == "category"
73
+ expected = Series([True, False, False], index=index)
74
+ tm.assert_series_equal(result, expected)
75
+
76
+ def test_concat_categoricalindex(self):
77
+ # GH 16111, categories that aren't lexsorted
78
+ categories = [9, 0, 1, 2, 3]
79
+
80
+ a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
81
+ b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
82
+ c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
83
+
84
+ result = pd.concat([a, b, c], axis=1)
85
+
86
+ exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
87
+ exp = DataFrame(
88
+ {
89
+ 0: [1, 1, np.nan, np.nan],
90
+ 1: [np.nan, 2, 2, np.nan],
91
+ 2: [np.nan, np.nan, 3, 3],
92
+ },
93
+ columns=[0, 1, 2],
94
+ index=exp_idx,
95
+ )
96
+ tm.assert_frame_equal(result, exp)
97
+
98
+ def test_categorical_concat_preserve(self):
99
+ # GH 8641 series concat not preserving category dtype
100
+ # GH 13524 can concat different categories
101
+ s = Series(list("abc"), dtype="category")
102
+ s2 = Series(list("abd"), dtype="category")
103
+
104
+ exp = Series(list("abcabd"))
105
+ res = pd.concat([s, s2], ignore_index=True)
106
+ tm.assert_series_equal(res, exp)
107
+
108
+ exp = Series(list("abcabc"), dtype="category")
109
+ res = pd.concat([s, s], ignore_index=True)
110
+ tm.assert_series_equal(res, exp)
111
+
112
+ exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
113
+ res = pd.concat([s, s])
114
+ tm.assert_series_equal(res, exp)
115
+
116
+ a = Series(np.arange(6, dtype="int64"))
117
+ b = Series(list("aabbca"))
118
+
119
+ df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
120
+ res = pd.concat([df2, df2])
121
+ exp = DataFrame(
122
+ {
123
+ "A": pd.concat([a, a]),
124
+ "B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
125
+ }
126
+ )
127
+ tm.assert_frame_equal(res, exp)
128
+
129
+ def test_categorical_index_preserver(self):
130
+ a = Series(np.arange(6, dtype="int64"))
131
+ b = Series(list("aabbca"))
132
+
133
+ df2 = DataFrame(
134
+ {"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
135
+ ).set_index("B")
136
+ result = pd.concat([df2, df2])
137
+ expected = DataFrame(
138
+ {
139
+ "A": pd.concat([a, a]),
140
+ "B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
141
+ }
142
+ ).set_index("B")
143
+ tm.assert_frame_equal(result, expected)
144
+
145
+ # wrong categories -> uses concat_compat, which casts to object
146
+ df3 = DataFrame(
147
+ {"A": a, "B": Categorical(b, categories=list("abe"))}
148
+ ).set_index("B")
149
+ result = pd.concat([df2, df3])
150
+ expected = pd.concat(
151
+ [
152
+ df2.set_axis(df2.index.astype(object), axis=0),
153
+ df3.set_axis(df3.index.astype(object), axis=0),
154
+ ]
155
+ )
156
+ tm.assert_frame_equal(result, expected)
157
+
158
+ def test_concat_categorical_tz(self):
159
+ # GH-23816
160
+ a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
161
+ b = Series(["a", "b"], dtype="category")
162
+ result = pd.concat([a, b], ignore_index=True)
163
+ expected = Series(
164
+ [
165
+ pd.Timestamp("2017-01-01", tz="US/Pacific"),
166
+ pd.Timestamp("2017-01-02", tz="US/Pacific"),
167
+ "a",
168
+ "b",
169
+ ]
170
+ )
171
+ tm.assert_series_equal(result, expected)
172
+
173
+ def test_concat_categorical_datetime(self):
174
+ # GH-39443
175
+ df1 = DataFrame(
176
+ {"x": Series(datetime(2021, 1, 1), index=[0], dtype="category")}
177
+ )
178
+ df2 = DataFrame(
179
+ {"x": Series(datetime(2021, 1, 2), index=[1], dtype="category")}
180
+ )
181
+
182
+ result = pd.concat([df1, df2])
183
+ expected = DataFrame(
184
+ {"x": Series([datetime(2021, 1, 1), datetime(2021, 1, 2)])}
185
+ )
186
+
187
+ tm.assert_equal(result, expected)
188
+
189
+ def test_concat_categorical_unchanged(self):
190
+ # GH-12007
191
+ # test fix for when concat on categorical and float
192
+ # coerces dtype categorical -> float
193
+ df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
194
+ ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
195
+ result = pd.concat([df, ser], axis=1)
196
+ expected = DataFrame(
197
+ {
198
+ "A": Series(["a", "b", "c", np.nan], dtype="category"),
199
+ "B": Series([0, 1, np.nan, 2], dtype="float"),
200
+ }
201
+ )
202
+ tm.assert_equal(result, expected)
203
+
204
+ def test_categorical_concat_gh7864(self):
205
+ # GH 7864
206
+ # make sure ordering is preserved
207
+ df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
208
+ df["grade"] = Categorical(df["raw_grade"])
209
+ df["grade"].cat.set_categories(["e", "a", "b"])
210
+
211
+ df1 = df[0:3]
212
+ df2 = df[3:]
213
+
214
+ tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
215
+ tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
216
+
217
+ dfx = pd.concat([df1, df2])
218
+ tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
219
+
220
+ dfa = df1._append(df2)
221
+ tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
222
+
223
+ def test_categorical_index_upcast(self):
224
+ # GH 17629
225
+ # test upcasting to object when concatenating on categorical indexes
226
+ # with non-identical categories
227
+
228
+ a = DataFrame({"foo": [1, 2]}, index=Categorical(["foo", "bar"]))
229
+ b = DataFrame({"foo": [4, 3]}, index=Categorical(["baz", "bar"]))
230
+
231
+ res = pd.concat([a, b])
232
+ exp = DataFrame({"foo": [1, 2, 4, 3]}, index=["foo", "bar", "baz", "bar"])
233
+
234
+ tm.assert_equal(res, exp)
235
+
236
+ a = Series([1, 2], index=Categorical(["foo", "bar"]))
237
+ b = Series([4, 3], index=Categorical(["baz", "bar"]))
238
+
239
+ res = pd.concat([a, b])
240
+ exp = Series([1, 2, 4, 3], index=["foo", "bar", "baz", "bar"])
241
+
242
+ tm.assert_equal(res, exp)
243
+
244
+ def test_categorical_missing_from_one_frame(self):
245
+ # GH 25412
246
+ df1 = DataFrame({"f1": [1, 2, 3]})
247
+ df2 = DataFrame({"f1": [2, 3, 1], "f2": Series([4, 4, 4]).astype("category")})
248
+ result = pd.concat([df1, df2], sort=True)
249
+ dtype = CategoricalDtype([4])
250
+ expected = DataFrame(
251
+ {
252
+ "f1": [1, 2, 3, 2, 3, 1],
253
+ "f2": Categorical.from_codes([-1, -1, -1, 0, 0, 0], dtype=dtype),
254
+ },
255
+ index=[0, 1, 2, 0, 1, 2],
256
+ )
257
+ tm.assert_frame_equal(result, expected)
258
+
259
+ def test_concat_categorical_same_categories_different_order(self):
260
+ # https://github.com/pandas-dev/pandas/issues/24845
261
+
262
+ c1 = pd.CategoricalIndex(["a", "a"], categories=["a", "b"], ordered=False)
263
+ c2 = pd.CategoricalIndex(["b", "b"], categories=["b", "a"], ordered=False)
264
+ c3 = pd.CategoricalIndex(
265
+ ["a", "a", "b", "b"], categories=["a", "b"], ordered=False
266
+ )
267
+
268
+ df1 = DataFrame({"A": [1, 2]}, index=c1)
269
+ df2 = DataFrame({"A": [3, 4]}, index=c2)
270
+
271
+ result = pd.concat((df1, df2))
272
+ expected = DataFrame({"A": [1, 2, 3, 4]}, index=c3)
273
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py ADDED
@@ -0,0 +1,912 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import (
2
+ abc,
3
+ deque,
4
+ )
5
+ from collections.abc import Iterator
6
+ from datetime import datetime
7
+ from decimal import Decimal
8
+
9
+ import numpy as np
10
+ import pytest
11
+
12
+ from pandas.errors import InvalidIndexError
13
+ import pandas.util._test_decorators as td
14
+
15
+ import pandas as pd
16
+ from pandas import (
17
+ DataFrame,
18
+ Index,
19
+ MultiIndex,
20
+ PeriodIndex,
21
+ Series,
22
+ concat,
23
+ date_range,
24
+ )
25
+ import pandas._testing as tm
26
+ from pandas.core.arrays import SparseArray
27
+ from pandas.tests.extension.decimal import to_decimal
28
+
29
+
30
+ class TestConcatenate:
31
+ def test_append_concat(self):
32
+ # GH#1815
33
+ d1 = date_range("12/31/1990", "12/31/1999", freq="YE-DEC")
34
+ d2 = date_range("12/31/2000", "12/31/2009", freq="YE-DEC")
35
+
36
+ s1 = Series(np.random.default_rng(2).standard_normal(10), d1)
37
+ s2 = Series(np.random.default_rng(2).standard_normal(10), d2)
38
+
39
+ s1 = s1.to_period()
40
+ s2 = s2.to_period()
41
+
42
+ # drops index
43
+ result = concat([s1, s2])
44
+ assert isinstance(result.index, PeriodIndex)
45
+ assert result.index[0] == s1.index[0]
46
+
47
+ def test_concat_copy(self, using_array_manager, using_copy_on_write):
48
+ df = DataFrame(np.random.default_rng(2).standard_normal((4, 3)))
49
+ df2 = DataFrame(np.random.default_rng(2).integers(0, 10, size=4).reshape(4, 1))
50
+ df3 = DataFrame({5: "foo"}, index=range(4))
51
+
52
+ # These are actual copies.
53
+ result = concat([df, df2, df3], axis=1, copy=True)
54
+
55
+ if not using_copy_on_write:
56
+ for arr in result._mgr.arrays:
57
+ assert not any(
58
+ np.shares_memory(arr, y)
59
+ for x in [df, df2, df3]
60
+ for y in x._mgr.arrays
61
+ )
62
+ else:
63
+ for arr in result._mgr.arrays:
64
+ assert arr.base is not None
65
+
66
+ # These are the same.
67
+ result = concat([df, df2, df3], axis=1, copy=False)
68
+
69
+ for arr in result._mgr.arrays:
70
+ if arr.dtype.kind == "f":
71
+ assert arr.base is df._mgr.arrays[0].base
72
+ elif arr.dtype.kind in ["i", "u"]:
73
+ assert arr.base is df2._mgr.arrays[0].base
74
+ elif arr.dtype == object:
75
+ if using_array_manager:
76
+ # we get the same array object, which has no base
77
+ assert arr is df3._mgr.arrays[0]
78
+ else:
79
+ assert arr.base is not None
80
+
81
+ # Float block was consolidated.
82
+ df4 = DataFrame(np.random.default_rng(2).standard_normal((4, 1)))
83
+ result = concat([df, df2, df3, df4], axis=1, copy=False)
84
+ for arr in result._mgr.arrays:
85
+ if arr.dtype.kind == "f":
86
+ if using_array_manager or using_copy_on_write:
87
+ # this is a view on some array in either df or df4
88
+ assert any(
89
+ np.shares_memory(arr, other)
90
+ for other in df._mgr.arrays + df4._mgr.arrays
91
+ )
92
+ else:
93
+ # the block was consolidated, so we got a copy anyway
94
+ assert arr.base is None
95
+ elif arr.dtype.kind in ["i", "u"]:
96
+ assert arr.base is df2._mgr.arrays[0].base
97
+ elif arr.dtype == object:
98
+ # this is a view on df3
99
+ assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
100
+
101
+ def test_concat_with_group_keys(self):
102
+ # axis=0
103
+ df = DataFrame(np.random.default_rng(2).standard_normal((3, 4)))
104
+ df2 = DataFrame(np.random.default_rng(2).standard_normal((4, 4)))
105
+
106
+ result = concat([df, df2], keys=[0, 1])
107
+ exp_index = MultiIndex.from_arrays(
108
+ [[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
109
+ )
110
+ expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
111
+ tm.assert_frame_equal(result, expected)
112
+
113
+ result = concat([df, df], keys=[0, 1])
114
+ exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
115
+ expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
116
+ tm.assert_frame_equal(result, expected)
117
+
118
+ # axis=1
119
+ df = DataFrame(np.random.default_rng(2).standard_normal((4, 3)))
120
+ df2 = DataFrame(np.random.default_rng(2).standard_normal((4, 4)))
121
+
122
+ result = concat([df, df2], keys=[0, 1], axis=1)
123
+ expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
124
+ tm.assert_frame_equal(result, expected)
125
+
126
+ result = concat([df, df], keys=[0, 1], axis=1)
127
+ expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
128
+ tm.assert_frame_equal(result, expected)
129
+
130
+ def test_concat_keys_specific_levels(self):
131
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
132
+ pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
133
+ level = ["three", "two", "one", "zero"]
134
+ result = concat(
135
+ pieces,
136
+ axis=1,
137
+ keys=["one", "two", "three"],
138
+ levels=[level],
139
+ names=["group_key"],
140
+ )
141
+
142
+ tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
143
+ tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
144
+
145
+ assert result.columns.names == ["group_key", None]
146
+
147
+ @pytest.mark.parametrize("mapping", ["mapping", "dict"])
148
+ def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
149
+ constructor = dict if mapping == "dict" else non_dict_mapping_subclass
150
+ frames = constructor(
151
+ {
152
+ "foo": DataFrame(np.random.default_rng(2).standard_normal((4, 3))),
153
+ "bar": DataFrame(np.random.default_rng(2).standard_normal((4, 3))),
154
+ "baz": DataFrame(np.random.default_rng(2).standard_normal((4, 3))),
155
+ "qux": DataFrame(np.random.default_rng(2).standard_normal((4, 3))),
156
+ }
157
+ )
158
+
159
+ sorted_keys = list(frames.keys())
160
+
161
+ result = concat(frames)
162
+ expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
163
+ tm.assert_frame_equal(result, expected)
164
+
165
+ result = concat(frames, axis=1)
166
+ expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
167
+ tm.assert_frame_equal(result, expected)
168
+
169
+ keys = ["baz", "foo", "bar"]
170
+ result = concat(frames, keys=keys)
171
+ expected = concat([frames[k] for k in keys], keys=keys)
172
+ tm.assert_frame_equal(result, expected)
173
+
174
+ def test_concat_keys_and_levels(self):
175
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 3)))
176
+ df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4)))
177
+
178
+ levels = [["foo", "baz"], ["one", "two"]]
179
+ names = ["first", "second"]
180
+ result = concat(
181
+ [df, df2, df, df2],
182
+ keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
183
+ levels=levels,
184
+ names=names,
185
+ )
186
+ expected = concat([df, df2, df, df2])
187
+ exp_index = MultiIndex(
188
+ levels=levels + [[0]],
189
+ codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
190
+ names=names + [None],
191
+ )
192
+ expected.index = exp_index
193
+
194
+ tm.assert_frame_equal(result, expected)
195
+
196
+ # no names
197
+ result = concat(
198
+ [df, df2, df, df2],
199
+ keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
200
+ levels=levels,
201
+ )
202
+ assert result.index.names == (None,) * 3
203
+
204
+ # no levels
205
+ result = concat(
206
+ [df, df2, df, df2],
207
+ keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
208
+ names=["first", "second"],
209
+ )
210
+ assert result.index.names == ("first", "second", None)
211
+ tm.assert_index_equal(
212
+ result.index.levels[0], Index(["baz", "foo"], name="first")
213
+ )
214
+
215
+ def test_concat_keys_levels_no_overlap(self):
216
+ # GH #1406
217
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 3)), index=["a"])
218
+ df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4)), index=["b"])
219
+
220
+ msg = "Values not found in passed level"
221
+ with pytest.raises(ValueError, match=msg):
222
+ concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
223
+
224
+ msg = "Key one not in level"
225
+ with pytest.raises(ValueError, match=msg):
226
+ concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
227
+
228
+ def test_crossed_dtypes_weird_corner(self):
229
+ columns = ["A", "B", "C", "D"]
230
+ df1 = DataFrame(
231
+ {
232
+ "A": np.array([1, 2, 3, 4], dtype="f8"),
233
+ "B": np.array([1, 2, 3, 4], dtype="i8"),
234
+ "C": np.array([1, 2, 3, 4], dtype="f8"),
235
+ "D": np.array([1, 2, 3, 4], dtype="i8"),
236
+ },
237
+ columns=columns,
238
+ )
239
+
240
+ df2 = DataFrame(
241
+ {
242
+ "A": np.array([1, 2, 3, 4], dtype="i8"),
243
+ "B": np.array([1, 2, 3, 4], dtype="f8"),
244
+ "C": np.array([1, 2, 3, 4], dtype="i8"),
245
+ "D": np.array([1, 2, 3, 4], dtype="f8"),
246
+ },
247
+ columns=columns,
248
+ )
249
+
250
+ appended = concat([df1, df2], ignore_index=True)
251
+ expected = DataFrame(
252
+ np.concatenate([df1.values, df2.values], axis=0), columns=columns
253
+ )
254
+ tm.assert_frame_equal(appended, expected)
255
+
256
+ df = DataFrame(np.random.default_rng(2).standard_normal((1, 3)), index=["a"])
257
+ df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4)), index=["b"])
258
+ result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
259
+ assert result.index.names == ("first", "second")
260
+
261
+ def test_with_mixed_tuples(self, sort):
262
+ # 10697
263
+ # columns have mixed tuples, so handle properly
264
+ df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
265
+ df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
266
+
267
+ # it works
268
+ concat([df1, df2], sort=sort)
269
+
270
+ def test_concat_mixed_objs_columns(self):
271
+ # Test column-wise concat for mixed series/frames (axis=1)
272
+ # G2385
273
+
274
+ index = date_range("01-Jan-2013", periods=10, freq="h")
275
+ arr = np.arange(10, dtype="int64")
276
+ s1 = Series(arr, index=index)
277
+ s2 = Series(arr, index=index)
278
+ df = DataFrame(arr.reshape(-1, 1), index=index)
279
+
280
+ expected = DataFrame(
281
+ np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
282
+ )
283
+ result = concat([df, df], axis=1)
284
+ tm.assert_frame_equal(result, expected)
285
+
286
+ expected = DataFrame(
287
+ np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
288
+ )
289
+ result = concat([s1, s2], axis=1)
290
+ tm.assert_frame_equal(result, expected)
291
+
292
+ expected = DataFrame(
293
+ np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
294
+ )
295
+ result = concat([s1, s2, s1], axis=1)
296
+ tm.assert_frame_equal(result, expected)
297
+
298
+ expected = DataFrame(
299
+ np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
300
+ )
301
+ result = concat([s1, df, s2, s2, s1], axis=1)
302
+ tm.assert_frame_equal(result, expected)
303
+
304
+ # with names
305
+ s1.name = "foo"
306
+ expected = DataFrame(
307
+ np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
308
+ )
309
+ result = concat([s1, df, s2], axis=1)
310
+ tm.assert_frame_equal(result, expected)
311
+
312
+ s2.name = "bar"
313
+ expected = DataFrame(
314
+ np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
315
+ )
316
+ result = concat([s1, df, s2], axis=1)
317
+ tm.assert_frame_equal(result, expected)
318
+
319
+ # ignore index
320
+ expected = DataFrame(
321
+ np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
322
+ )
323
+ result = concat([s1, df, s2], axis=1, ignore_index=True)
324
+ tm.assert_frame_equal(result, expected)
325
+
326
+ def test_concat_mixed_objs_index(self):
327
+ # Test row-wise concat for mixed series/frames with a common name
328
+ # GH2385, GH15047
329
+
330
+ index = date_range("01-Jan-2013", periods=10, freq="h")
331
+ arr = np.arange(10, dtype="int64")
332
+ s1 = Series(arr, index=index)
333
+ s2 = Series(arr, index=index)
334
+ df = DataFrame(arr.reshape(-1, 1), index=index)
335
+
336
+ expected = DataFrame(
337
+ np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
338
+ )
339
+ result = concat([s1, df, s2])
340
+ tm.assert_frame_equal(result, expected)
341
+
342
+ def test_concat_mixed_objs_index_names(self):
343
+ # Test row-wise concat for mixed series/frames with distinct names
344
+ # GH2385, GH15047
345
+
346
+ index = date_range("01-Jan-2013", periods=10, freq="h")
347
+ arr = np.arange(10, dtype="int64")
348
+ s1 = Series(arr, index=index, name="foo")
349
+ s2 = Series(arr, index=index, name="bar")
350
+ df = DataFrame(arr.reshape(-1, 1), index=index)
351
+
352
+ expected = DataFrame(
353
+ np.kron(np.where(np.identity(3) == 1, 1, np.nan), arr).T,
354
+ index=index.tolist() * 3,
355
+ columns=["foo", 0, "bar"],
356
+ )
357
+ result = concat([s1, df, s2])
358
+ tm.assert_frame_equal(result, expected)
359
+
360
+ # Rename all series to 0 when ignore_index=True
361
+ expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
362
+ result = concat([s1, df, s2], ignore_index=True)
363
+ tm.assert_frame_equal(result, expected)
364
+
365
+ def test_dtype_coercion(self):
366
+ # 12411
367
+ df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
368
+
369
+ result = concat([df.iloc[[0]], df.iloc[[1]]])
370
+ tm.assert_series_equal(result.dtypes, df.dtypes)
371
+
372
+ # 12045
373
+ df = DataFrame({"date": [datetime(2012, 1, 1), datetime(1012, 1, 2)]})
374
+ result = concat([df.iloc[[0]], df.iloc[[1]]])
375
+ tm.assert_series_equal(result.dtypes, df.dtypes)
376
+
377
+ # 11594
378
+ df = DataFrame({"text": ["some words"] + [None] * 9})
379
+ result = concat([df.iloc[[0]], df.iloc[[1]]])
380
+ tm.assert_series_equal(result.dtypes, df.dtypes)
381
+
382
+ def test_concat_single_with_key(self):
383
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
384
+
385
+ result = concat([df], keys=["foo"])
386
+ expected = concat([df, df], keys=["foo", "bar"])
387
+ tm.assert_frame_equal(result, expected[:10])
388
+
389
+ def test_concat_no_items_raises(self):
390
+ with pytest.raises(ValueError, match="No objects to concatenate"):
391
+ concat([])
392
+
393
+ def test_concat_exclude_none(self):
394
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))
395
+
396
+ pieces = [df[:5], None, None, df[5:]]
397
+ result = concat(pieces)
398
+ tm.assert_frame_equal(result, df)
399
+ with pytest.raises(ValueError, match="All objects passed were None"):
400
+ concat([None, None])
401
+
402
+ def test_concat_keys_with_none(self):
403
+ # #1649
404
+ df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
405
+
406
+ result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
407
+ expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0})
408
+ tm.assert_frame_equal(result, expected)
409
+
410
+ result = concat(
411
+ [None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
412
+ )
413
+ expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
414
+ tm.assert_frame_equal(result, expected)
415
+
416
+ def test_concat_bug_1719(self):
417
+ ts1 = Series(
418
+ np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
419
+ )
420
+ ts2 = ts1.copy()[::2]
421
+
422
+ # to join with union
423
+ # these two are of different length!
424
+ left = concat([ts1, ts2], join="outer", axis=1)
425
+ right = concat([ts2, ts1], join="outer", axis=1)
426
+
427
+ assert len(left) == len(right)
428
+
429
+ def test_concat_bug_2972(self):
430
+ ts0 = Series(np.zeros(5))
431
+ ts1 = Series(np.ones(5))
432
+ ts0.name = ts1.name = "same name"
433
+ result = concat([ts0, ts1], axis=1)
434
+
435
+ expected = DataFrame({0: ts0, 1: ts1})
436
+ expected.columns = ["same name", "same name"]
437
+ tm.assert_frame_equal(result, expected)
438
+
439
+ def test_concat_bug_3602(self):
440
+ # GH 3602, duplicate columns
441
+ df1 = DataFrame(
442
+ {
443
+ "firmNo": [0, 0, 0, 0],
444
+ "prc": [6, 6, 6, 6],
445
+ "stringvar": ["rrr", "rrr", "rrr", "rrr"],
446
+ }
447
+ )
448
+ df2 = DataFrame(
449
+ {"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
450
+ )
451
+ expected = DataFrame(
452
+ [
453
+ [0, 6, "rrr", 9, 1, 6],
454
+ [0, 6, "rrr", 10, 2, 6],
455
+ [0, 6, "rrr", 11, 3, 6],
456
+ [0, 6, "rrr", 12, 4, 6],
457
+ ]
458
+ )
459
+ expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
460
+
461
+ result = concat([df1, df2], axis=1)
462
+ tm.assert_frame_equal(result, expected)
463
+
464
+ def test_concat_iterables(self):
465
+ # GH8645 check concat works with tuples, list, generators, and weird
466
+ # stuff like deque and custom iterables
467
+ df1 = DataFrame([1, 2, 3])
468
+ df2 = DataFrame([4, 5, 6])
469
+ expected = DataFrame([1, 2, 3, 4, 5, 6])
470
+ tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
471
+ tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
472
+ tm.assert_frame_equal(
473
+ concat((df for df in (df1, df2)), ignore_index=True), expected
474
+ )
475
+ tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
476
+
477
+ class CustomIterator1:
478
+ def __len__(self) -> int:
479
+ return 2
480
+
481
+ def __getitem__(self, index):
482
+ try:
483
+ return {0: df1, 1: df2}[index]
484
+ except KeyError as err:
485
+ raise IndexError from err
486
+
487
+ tm.assert_frame_equal(concat(CustomIterator1(), ignore_index=True), expected)
488
+
489
+ class CustomIterator2(abc.Iterable):
490
+ def __iter__(self) -> Iterator:
491
+ yield df1
492
+ yield df2
493
+
494
+ tm.assert_frame_equal(concat(CustomIterator2(), ignore_index=True), expected)
495
+
496
+ def test_concat_order(self):
497
+ # GH 17344, GH#47331
498
+ dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
499
+ dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for _ in range(100)]
500
+
501
+ result = concat(dfs, sort=True).columns
502
+ expected = Index([1, "a", None])
503
+ tm.assert_index_equal(result, expected)
504
+
505
+ def test_concat_different_extension_dtypes_upcasts(self):
506
+ a = Series(pd.array([1, 2], dtype="Int64"))
507
+ b = Series(to_decimal([1, 2]))
508
+
509
+ result = concat([a, b], ignore_index=True)
510
+ expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
511
+ tm.assert_series_equal(result, expected)
512
+
513
+ def test_concat_ordered_dict(self):
514
+ # GH 21510
515
+ expected = concat(
516
+ [Series(range(3)), Series(range(4))], keys=["First", "Another"]
517
+ )
518
+ result = concat({"First": Series(range(3)), "Another": Series(range(4))})
519
+ tm.assert_series_equal(result, expected)
520
+
521
+ def test_concat_duplicate_indices_raise(self):
522
+ # GH 45888: test raise for concat DataFrames with duplicate indices
523
+ # https://github.com/pandas-dev/pandas/issues/36263
524
+ df1 = DataFrame(
525
+ np.random.default_rng(2).standard_normal(5),
526
+ index=[0, 1, 2, 3, 3],
527
+ columns=["a"],
528
+ )
529
+ df2 = DataFrame(
530
+ np.random.default_rng(2).standard_normal(5),
531
+ index=[0, 1, 2, 2, 4],
532
+ columns=["b"],
533
+ )
534
+ msg = "Reindexing only valid with uniquely valued Index objects"
535
+ with pytest.raises(InvalidIndexError, match=msg):
536
+ concat([df1, df2], axis=1)
537
+
538
+
539
+ def test_concat_no_unnecessary_upcast(float_numpy_dtype, frame_or_series):
540
+ # GH 13247
541
+ dims = frame_or_series(dtype=object).ndim
542
+ dt = float_numpy_dtype
543
+
544
+ dfs = [
545
+ frame_or_series(np.array([1], dtype=dt, ndmin=dims)),
546
+ frame_or_series(np.array([np.nan], dtype=dt, ndmin=dims)),
547
+ frame_or_series(np.array([5], dtype=dt, ndmin=dims)),
548
+ ]
549
+ x = concat(dfs)
550
+ assert x.values.dtype == dt
551
+
552
+
553
+ @pytest.mark.parametrize("pdt", [Series, DataFrame])
554
+ def test_concat_will_upcast(pdt, any_signed_int_numpy_dtype):
555
+ dt = any_signed_int_numpy_dtype
556
+ dims = pdt().ndim
557
+ dfs = [
558
+ pdt(np.array([1], dtype=dt, ndmin=dims)),
559
+ pdt(np.array([np.nan], ndmin=dims)),
560
+ pdt(np.array([5], dtype=dt, ndmin=dims)),
561
+ ]
562
+ x = concat(dfs)
563
+ assert x.values.dtype == "float64"
564
+
565
+
566
+ def test_concat_empty_and_non_empty_frame_regression():
567
+ # GH 18178 regression test
568
+ df1 = DataFrame({"foo": [1]})
569
+ df2 = DataFrame({"foo": []})
570
+ expected = DataFrame({"foo": [1.0]})
571
+ result = concat([df1, df2])
572
+ tm.assert_frame_equal(result, expected)
573
+
574
+
575
+ def test_concat_sparse():
576
+ # GH 23557
577
+ a = Series(SparseArray([0, 1, 2]))
578
+ expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
579
+ pd.SparseDtype(np.int64, 0)
580
+ )
581
+ result = concat([a, a], axis=1)
582
+ tm.assert_frame_equal(result, expected)
583
+
584
+
585
+ def test_concat_dense_sparse():
586
+ # GH 30668
587
+ dtype = pd.SparseDtype(np.float64, None)
588
+ a = Series(pd.arrays.SparseArray([1, None]), dtype=dtype)
589
+ b = Series([1], dtype=float)
590
+ expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(dtype)
591
+ result = concat([a, b], axis=0)
592
+ tm.assert_series_equal(result, expected)
593
+
594
+
595
+ @pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]])
596
+ def test_duplicate_keys(keys):
597
+ # GH 33654
598
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
599
+ s1 = Series([7, 8, 9], name="c")
600
+ s2 = Series([10, 11, 12], name="d")
601
+ result = concat([df, s1, s2], axis=1, keys=keys)
602
+ expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]]
603
+ expected_columns = MultiIndex.from_tuples(
604
+ [(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")]
605
+ )
606
+ expected = DataFrame(expected_values, columns=expected_columns)
607
+ tm.assert_frame_equal(result, expected)
608
+
609
+
610
+ def test_duplicate_keys_same_frame():
611
+ # GH 43595
612
+ keys = ["e", "e"]
613
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
614
+ result = concat([df, df], axis=1, keys=keys)
615
+ expected_values = [[1, 4, 1, 4], [2, 5, 2, 5], [3, 6, 3, 6]]
616
+ expected_columns = MultiIndex.from_tuples(
617
+ [(keys[0], "a"), (keys[0], "b"), (keys[1], "a"), (keys[1], "b")]
618
+ )
619
+ expected = DataFrame(expected_values, columns=expected_columns)
620
+ tm.assert_frame_equal(result, expected)
621
+
622
+
623
+ @pytest.mark.filterwarnings(
624
+ "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"
625
+ )
626
+ @pytest.mark.parametrize(
627
+ "obj",
628
+ [
629
+ tm.SubclassedDataFrame({"A": np.arange(0, 10)}),
630
+ tm.SubclassedSeries(np.arange(0, 10), name="A"),
631
+ ],
632
+ )
633
+ def test_concat_preserves_subclass(obj):
634
+ # GH28330 -- preserve subclass
635
+
636
+ result = concat([obj, obj])
637
+ assert isinstance(result, type(obj))
638
+
639
+
640
+ def test_concat_frame_axis0_extension_dtypes():
641
+ # preserve extension dtype (through common_dtype mechanism)
642
+ df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")})
643
+ df2 = DataFrame({"a": np.array([4, 5, 6])})
644
+
645
+ result = concat([df1, df2], ignore_index=True)
646
+ expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64")
647
+ tm.assert_frame_equal(result, expected)
648
+
649
+ result = concat([df2, df1], ignore_index=True)
650
+ expected = DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64")
651
+ tm.assert_frame_equal(result, expected)
652
+
653
+
654
+ def test_concat_preserves_extension_int64_dtype():
655
+ # GH 24768
656
+ df_a = DataFrame({"a": [-1]}, dtype="Int64")
657
+ df_b = DataFrame({"b": [1]}, dtype="Int64")
658
+ result = concat([df_a, df_b], ignore_index=True)
659
+ expected = DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64")
660
+ tm.assert_frame_equal(result, expected)
661
+
662
+
663
+ @pytest.mark.parametrize(
664
+ "dtype1,dtype2,expected_dtype",
665
+ [
666
+ ("bool", "bool", "bool"),
667
+ ("boolean", "bool", "boolean"),
668
+ ("bool", "boolean", "boolean"),
669
+ ("boolean", "boolean", "boolean"),
670
+ ],
671
+ )
672
+ def test_concat_bool_types(dtype1, dtype2, expected_dtype):
673
+ # GH 42800
674
+ ser1 = Series([True, False], dtype=dtype1)
675
+ ser2 = Series([False, True], dtype=dtype2)
676
+ result = concat([ser1, ser2], ignore_index=True)
677
+ expected = Series([True, False, False, True], dtype=expected_dtype)
678
+ tm.assert_series_equal(result, expected)
679
+
680
+
681
+ @pytest.mark.parametrize(
682
+ ("keys", "integrity"),
683
+ [
684
+ (["red"] * 3, True),
685
+ (["red"] * 3, False),
686
+ (["red", "blue", "red"], False),
687
+ (["red", "blue", "red"], True),
688
+ ],
689
+ )
690
+ def test_concat_repeated_keys(keys, integrity):
691
+ # GH: 20816
692
+ series_list = [Series({"a": 1}), Series({"b": 2}), Series({"c": 3})]
693
+ result = concat(series_list, keys=keys, verify_integrity=integrity)
694
+ tuples = list(zip(keys, ["a", "b", "c"]))
695
+ expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples))
696
+ tm.assert_series_equal(result, expected)
697
+
698
+
699
+ def test_concat_null_object_with_dti():
700
+ # GH#40841
701
+ dti = pd.DatetimeIndex(
702
+ ["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)"
703
+ )
704
+ right = DataFrame(data={"C": [0.5274]}, index=dti)
705
+
706
+ idx = Index([None], dtype="object", name="Maybe Time (UTC)")
707
+ left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx)
708
+
709
+ result = concat([left, right], axis="columns")
710
+
711
+ exp_index = Index([None, dti[0]], dtype=object)
712
+ expected = DataFrame(
713
+ {
714
+ "A": np.array([None, np.nan], dtype=object),
715
+ "B": [np.nan, np.nan],
716
+ "C": [np.nan, 0.5274],
717
+ },
718
+ index=exp_index,
719
+ )
720
+ tm.assert_frame_equal(result, expected)
721
+
722
+
723
+ def test_concat_multiindex_with_empty_rangeindex():
724
+ # GH#41234
725
+ mi = MultiIndex.from_tuples([("B", 1), ("C", 1)])
726
+ df1 = DataFrame([[1, 2]], columns=mi)
727
+ df2 = DataFrame(index=[1], columns=pd.RangeIndex(0))
728
+
729
+ result = concat([df1, df2])
730
+ expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi)
731
+ tm.assert_frame_equal(result, expected)
732
+
733
+
734
+ @pytest.mark.parametrize(
735
+ "data",
736
+ [
737
+ Series(data=[1, 2]),
738
+ DataFrame(
739
+ data={
740
+ "col1": [1, 2],
741
+ }
742
+ ),
743
+ DataFrame(dtype=float),
744
+ Series(dtype=float),
745
+ ],
746
+ )
747
+ def test_concat_drop_attrs(data):
748
+ # GH#41828
749
+ df1 = data.copy()
750
+ df1.attrs = {1: 1}
751
+ df2 = data.copy()
752
+ df2.attrs = {1: 2}
753
+ df = concat([df1, df2])
754
+ assert len(df.attrs) == 0
755
+
756
+
757
+ @pytest.mark.parametrize(
758
+ "data",
759
+ [
760
+ Series(data=[1, 2]),
761
+ DataFrame(
762
+ data={
763
+ "col1": [1, 2],
764
+ }
765
+ ),
766
+ DataFrame(dtype=float),
767
+ Series(dtype=float),
768
+ ],
769
+ )
770
+ def test_concat_retain_attrs(data):
771
+ # GH#41828
772
+ df1 = data.copy()
773
+ df1.attrs = {1: 1}
774
+ df2 = data.copy()
775
+ df2.attrs = {1: 1}
776
+ df = concat([df1, df2])
777
+ assert df.attrs[1] == 1
778
+
779
+
780
+ @td.skip_array_manager_invalid_test
781
+ @pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"])
782
+ @pytest.mark.parametrize("empty_dtype", [None, "float64", "object"])
783
+ def test_concat_ignore_empty_object_float(empty_dtype, df_dtype):
784
+ # https://github.com/pandas-dev/pandas/issues/45637
785
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype)
786
+ empty = DataFrame(columns=["foo", "bar"], dtype=empty_dtype)
787
+
788
+ msg = "The behavior of DataFrame concatenation with empty or all-NA entries"
789
+ warn = None
790
+ if df_dtype == "datetime64[ns]" or (
791
+ df_dtype == "float64" and empty_dtype != "float64"
792
+ ):
793
+ warn = FutureWarning
794
+ with tm.assert_produces_warning(warn, match=msg):
795
+ result = concat([empty, df])
796
+ expected = df
797
+ if df_dtype == "int64":
798
+ # TODO what exact behaviour do we want for integer eventually?
799
+ if empty_dtype == "float64":
800
+ expected = df.astype("float64")
801
+ else:
802
+ expected = df.astype("object")
803
+ tm.assert_frame_equal(result, expected)
804
+
805
+
806
+ @td.skip_array_manager_invalid_test
807
+ @pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"])
808
+ @pytest.mark.parametrize("empty_dtype", [None, "float64", "object"])
809
+ def test_concat_ignore_all_na_object_float(empty_dtype, df_dtype):
810
+ df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype)
811
+ empty = DataFrame({"foo": [np.nan], "bar": [np.nan]}, dtype=empty_dtype)
812
+
813
+ if df_dtype == "int64":
814
+ # TODO what exact behaviour do we want for integer eventually?
815
+ if empty_dtype == "object":
816
+ df_dtype = "object"
817
+ else:
818
+ df_dtype = "float64"
819
+
820
+ msg = "The behavior of DataFrame concatenation with empty or all-NA entries"
821
+ warn = None
822
+ if empty_dtype != df_dtype and empty_dtype is not None:
823
+ warn = FutureWarning
824
+ elif df_dtype == "datetime64[ns]":
825
+ warn = FutureWarning
826
+
827
+ with tm.assert_produces_warning(warn, match=msg):
828
+ result = concat([empty, df], ignore_index=True)
829
+
830
+ expected = DataFrame({"foo": [np.nan, 1, 2], "bar": [np.nan, 1, 2]}, dtype=df_dtype)
831
+ tm.assert_frame_equal(result, expected)
832
+
833
+
834
+ @td.skip_array_manager_invalid_test
835
+ def test_concat_ignore_empty_from_reindex():
836
+ # https://github.com/pandas-dev/pandas/pull/43507#issuecomment-920375856
837
+ df1 = DataFrame({"a": [1], "b": [pd.Timestamp("2012-01-01")]})
838
+ df2 = DataFrame({"a": [2]})
839
+
840
+ aligned = df2.reindex(columns=df1.columns)
841
+
842
+ msg = "The behavior of DataFrame concatenation with empty or all-NA entries"
843
+ with tm.assert_produces_warning(FutureWarning, match=msg):
844
+ result = concat([df1, aligned], ignore_index=True)
845
+ expected = df1 = DataFrame({"a": [1, 2], "b": [pd.Timestamp("2012-01-01"), pd.NaT]})
846
+ tm.assert_frame_equal(result, expected)
847
+
848
+
849
+ def test_concat_mismatched_keys_length():
850
+ # GH#43485
851
+ ser = Series(range(5))
852
+ sers = [ser + n for n in range(4)]
853
+ keys = ["A", "B", "C"]
854
+
855
+ msg = r"The behavior of pd.concat with len\(keys\) != len\(objs\) is deprecated"
856
+ with tm.assert_produces_warning(FutureWarning, match=msg):
857
+ concat(sers, keys=keys, axis=1)
858
+ with tm.assert_produces_warning(FutureWarning, match=msg):
859
+ concat(sers, keys=keys, axis=0)
860
+ with tm.assert_produces_warning(FutureWarning, match=msg):
861
+ concat((x for x in sers), keys=(y for y in keys), axis=1)
862
+ with tm.assert_produces_warning(FutureWarning, match=msg):
863
+ concat((x for x in sers), keys=(y for y in keys), axis=0)
864
+
865
+
866
+ def test_concat_multiindex_with_category():
867
+ df1 = DataFrame(
868
+ {
869
+ "c1": Series(list("abc"), dtype="category"),
870
+ "c2": Series(list("eee"), dtype="category"),
871
+ "i2": Series([1, 2, 3]),
872
+ }
873
+ )
874
+ df1 = df1.set_index(["c1", "c2"])
875
+ df2 = DataFrame(
876
+ {
877
+ "c1": Series(list("abc"), dtype="category"),
878
+ "c2": Series(list("eee"), dtype="category"),
879
+ "i2": Series([4, 5, 6]),
880
+ }
881
+ )
882
+ df2 = df2.set_index(["c1", "c2"])
883
+ result = concat([df1, df2])
884
+ expected = DataFrame(
885
+ {
886
+ "c1": Series(list("abcabc"), dtype="category"),
887
+ "c2": Series(list("eeeeee"), dtype="category"),
888
+ "i2": Series([1, 2, 3, 4, 5, 6]),
889
+ }
890
+ )
891
+ expected = expected.set_index(["c1", "c2"])
892
+ tm.assert_frame_equal(result, expected)
893
+
894
+
895
+ def test_concat_ea_upcast():
896
+ # GH#54848
897
+ df1 = DataFrame(["a"], dtype="string")
898
+ df2 = DataFrame([1], dtype="Int64")
899
+ result = concat([df1, df2])
900
+ expected = DataFrame(["a", 1], index=[0, 0])
901
+ tm.assert_frame_equal(result, expected)
902
+
903
+
904
+ def test_concat_none_with_timezone_timestamp():
905
+ # GH#52093
906
+ df1 = DataFrame([{"A": None}])
907
+ df2 = DataFrame([{"A": pd.Timestamp("1990-12-20 00:00:00+00:00")}])
908
+ msg = "The behavior of DataFrame concatenation with empty or all-NA entries"
909
+ with tm.assert_produces_warning(FutureWarning, match=msg):
910
+ result = concat([df1, df2], ignore_index=True)
911
+ expected = DataFrame({"A": [None, pd.Timestamp("1990-12-20 00:00:00+00:00")]})
912
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ Index,
8
+ Series,
9
+ concat,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ class TestDataFrameConcat:
15
+ def test_concat_multiple_frames_dtypes(self):
16
+ # GH#2759
17
+ df1 = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64)
18
+ df2 = DataFrame(data=np.ones((10, 2)), dtype=np.float32)
19
+ results = concat((df1, df2), axis=1).dtypes
20
+ expected = Series(
21
+ [np.dtype("float64")] * 2 + [np.dtype("float32")] * 2,
22
+ index=["foo", "bar", 0, 1],
23
+ )
24
+ tm.assert_series_equal(results, expected)
25
+
26
+ def test_concat_tuple_keys(self):
27
+ # GH#14438
28
+ df1 = DataFrame(np.ones((2, 2)), columns=list("AB"))
29
+ df2 = DataFrame(np.ones((3, 2)) * 2, columns=list("AB"))
30
+ results = concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")])
31
+ expected = DataFrame(
32
+ {
33
+ "A": {
34
+ ("bee", "bah", 0): 1.0,
35
+ ("bee", "bah", 1): 1.0,
36
+ ("bee", "boo", 0): 2.0,
37
+ ("bee", "boo", 1): 2.0,
38
+ ("bee", "boo", 2): 2.0,
39
+ },
40
+ "B": {
41
+ ("bee", "bah", 0): 1.0,
42
+ ("bee", "bah", 1): 1.0,
43
+ ("bee", "boo", 0): 2.0,
44
+ ("bee", "boo", 1): 2.0,
45
+ ("bee", "boo", 2): 2.0,
46
+ },
47
+ }
48
+ )
49
+ tm.assert_frame_equal(results, expected)
50
+
51
+ def test_concat_named_keys(self):
52
+ # GH#14252
53
+ df = DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]})
54
+ index = Index(["a", "b"], name="baz")
55
+ concatted_named_from_keys = concat([df, df], keys=index)
56
+ expected_named = DataFrame(
57
+ {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
58
+ index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]),
59
+ )
60
+ tm.assert_frame_equal(concatted_named_from_keys, expected_named)
61
+
62
+ index_no_name = Index(["a", "b"], name=None)
63
+ concatted_named_from_names = concat([df, df], keys=index_no_name, names=["baz"])
64
+ tm.assert_frame_equal(concatted_named_from_names, expected_named)
65
+
66
+ concatted_unnamed = concat([df, df], keys=index_no_name)
67
+ expected_unnamed = DataFrame(
68
+ {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]},
69
+ index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]),
70
+ )
71
+ tm.assert_frame_equal(concatted_unnamed, expected_unnamed)
72
+
73
+ def test_concat_axis_parameter(self):
74
+ # GH#14369
75
+ df1 = DataFrame({"A": [0.1, 0.2]}, index=range(2))
76
+ df2 = DataFrame({"A": [0.3, 0.4]}, index=range(2))
77
+
78
+ # Index/row/0 DataFrame
79
+ expected_index = DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1])
80
+
81
+ concatted_index = concat([df1, df2], axis="index")
82
+ tm.assert_frame_equal(concatted_index, expected_index)
83
+
84
+ concatted_row = concat([df1, df2], axis="rows")
85
+ tm.assert_frame_equal(concatted_row, expected_index)
86
+
87
+ concatted_0 = concat([df1, df2], axis=0)
88
+ tm.assert_frame_equal(concatted_0, expected_index)
89
+
90
+ # Columns/1 DataFrame
91
+ expected_columns = DataFrame(
92
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"]
93
+ )
94
+
95
+ concatted_columns = concat([df1, df2], axis="columns")
96
+ tm.assert_frame_equal(concatted_columns, expected_columns)
97
+
98
+ concatted_1 = concat([df1, df2], axis=1)
99
+ tm.assert_frame_equal(concatted_1, expected_columns)
100
+
101
+ series1 = Series([0.1, 0.2])
102
+ series2 = Series([0.3, 0.4])
103
+
104
+ # Index/row/0 Series
105
+ expected_index_series = Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1])
106
+
107
+ concatted_index_series = concat([series1, series2], axis="index")
108
+ tm.assert_series_equal(concatted_index_series, expected_index_series)
109
+
110
+ concatted_row_series = concat([series1, series2], axis="rows")
111
+ tm.assert_series_equal(concatted_row_series, expected_index_series)
112
+
113
+ concatted_0_series = concat([series1, series2], axis=0)
114
+ tm.assert_series_equal(concatted_0_series, expected_index_series)
115
+
116
+ # Columns/1 Series
117
+ expected_columns_series = DataFrame(
118
+ [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1]
119
+ )
120
+
121
+ concatted_columns_series = concat([series1, series2], axis="columns")
122
+ tm.assert_frame_equal(concatted_columns_series, expected_columns_series)
123
+
124
+ concatted_1_series = concat([series1, series2], axis=1)
125
+ tm.assert_frame_equal(concatted_1_series, expected_columns_series)
126
+
127
+ # Testing ValueError
128
+ with pytest.raises(ValueError, match="No axis named"):
129
+ concat([series1, series2], axis="something")
130
+
131
+ def test_concat_numerical_names(self):
132
+ # GH#15262, GH#12223
133
+ df = DataFrame(
134
+ {"col": range(9)},
135
+ dtype="int32",
136
+ index=(
137
+ pd.MultiIndex.from_product(
138
+ [["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2]
139
+ )
140
+ ),
141
+ )
142
+ result = concat((df.iloc[:2, :], df.iloc[-2:, :]))
143
+ expected = DataFrame(
144
+ {"col": [0, 1, 7, 8]},
145
+ dtype="int32",
146
+ index=pd.MultiIndex.from_tuples(
147
+ [("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2]
148
+ ),
149
+ )
150
+ tm.assert_frame_equal(result, expected)
151
+
152
+ def test_concat_astype_dup_col(self):
153
+ # GH#23049
154
+ df = DataFrame([{"a": "b"}])
155
+ df = concat([df, df], axis=1)
156
+
157
+ result = df.astype("category")
158
+ expected = DataFrame(
159
+ np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"]
160
+ ).astype("category")
161
+ tm.assert_frame_equal(result, expected)
162
+
163
+ def test_concat_dataframe_keys_bug(self, sort):
164
+ t1 = DataFrame(
165
+ {"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
166
+ )
167
+ t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
168
+
169
+ # it works
170
+ result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
171
+ assert list(result.columns) == [("t1", "value"), ("t2", "value")]
172
+
173
+ def test_concat_bool_with_int(self):
174
+ # GH#42092 we may want to change this to return object, but that
175
+ # would need a deprecation
176
+ df1 = DataFrame(Series([True, False, True, True], dtype="bool"))
177
+ df2 = DataFrame(Series([1, 0, 1], dtype="int64"))
178
+
179
+ result = concat([df1, df2])
180
+ expected = concat([df1.astype("int64"), df2])
181
+ tm.assert_frame_equal(result, expected)
182
+
183
+ def test_concat_duplicates_in_index_with_keys(self):
184
+ # GH#42651
185
+ index = [1, 1, 3]
186
+ data = [1, 2, 3]
187
+
188
+ df = DataFrame(data=data, index=index)
189
+ result = concat([df], keys=["A"], names=["ID", "date"])
190
+ mi = pd.MultiIndex.from_product([["A"], index], names=["ID", "date"])
191
+ expected = DataFrame(data=data, index=mi)
192
+ tm.assert_frame_equal(result, expected)
193
+ tm.assert_index_equal(result.index.levels[1], Index([1, 3], name="date"))
194
+
195
+ @pytest.mark.parametrize("ignore_index", [True, False])
196
+ @pytest.mark.parametrize("order", ["C", "F"])
197
+ @pytest.mark.parametrize("axis", [0, 1])
198
+ def test_concat_copies(self, axis, order, ignore_index, using_copy_on_write):
199
+ # based on asv ConcatDataFrames
200
+ df = DataFrame(np.zeros((10, 5), dtype=np.float32, order=order))
201
+
202
+ res = concat([df] * 5, axis=axis, ignore_index=ignore_index, copy=True)
203
+
204
+ if not using_copy_on_write:
205
+ for arr in res._iter_column_arrays():
206
+ for arr2 in df._iter_column_arrays():
207
+ assert not np.shares_memory(arr, arr2)
208
+
209
+ def test_outer_sort_columns(self):
210
+ # GH#47127
211
+ df1 = DataFrame({"A": [0], "B": [1], 0: 1})
212
+ df2 = DataFrame({"A": [100]})
213
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
214
+ expected = DataFrame({0: [1.0, np.nan], "A": [0, 100], "B": [1.0, np.nan]})
215
+ tm.assert_frame_equal(result, expected)
216
+
217
+ def test_inner_sort_columns(self):
218
+ # GH#47127
219
+ df1 = DataFrame({"A": [0], "B": [1], 0: 1})
220
+ df2 = DataFrame({"A": [100], 0: 2})
221
+ result = concat([df1, df2], ignore_index=True, join="inner", sort=True)
222
+ expected = DataFrame({0: [1, 2], "A": [0, 100]})
223
+ tm.assert_frame_equal(result, expected)
224
+
225
+ def test_sort_columns_one_df(self):
226
+ # GH#47127
227
+ df1 = DataFrame({"A": [100], 0: 2})
228
+ result = concat([df1], ignore_index=True, join="inner", sort=True)
229
+ expected = DataFrame({0: [2], "A": [100]})
230
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime as dt
2
+ from datetime import datetime
3
+
4
+ import dateutil
5
+ import numpy as np
6
+ import pytest
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ DatetimeIndex,
12
+ Index,
13
+ MultiIndex,
14
+ Series,
15
+ Timestamp,
16
+ concat,
17
+ date_range,
18
+ to_timedelta,
19
+ )
20
+ import pandas._testing as tm
21
+
22
+
23
+ class TestDatetimeConcat:
24
+ def test_concat_datetime64_block(self):
25
+ rng = date_range("1/1/2000", periods=10)
26
+
27
+ df = DataFrame({"time": rng})
28
+
29
+ result = concat([df, df])
30
+ assert (result.iloc[:10]["time"] == rng).all()
31
+ assert (result.iloc[10:]["time"] == rng).all()
32
+
33
+ def test_concat_datetime_datetime64_frame(self):
34
+ # GH#2624
35
+ rows = []
36
+ rows.append([datetime(2010, 1, 1), 1])
37
+ rows.append([datetime(2010, 1, 2), "hi"])
38
+
39
+ df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
40
+
41
+ ind = date_range(start="2000/1/1", freq="D", periods=10)
42
+ df1 = DataFrame({"date": ind, "test": range(10)})
43
+
44
+ # it works!
45
+ concat([df1, df2_obj])
46
+
47
+ def test_concat_datetime_timezone(self):
48
+ # GH 18523
49
+ idx1 = date_range("2011-01-01", periods=3, freq="h", tz="Europe/Paris")
50
+ idx2 = date_range(start=idx1[0], end=idx1[-1], freq="h")
51
+ df1 = DataFrame({"a": [1, 2, 3]}, index=idx1)
52
+ df2 = DataFrame({"b": [1, 2, 3]}, index=idx2)
53
+ result = concat([df1, df2], axis=1)
54
+
55
+ exp_idx = DatetimeIndex(
56
+ [
57
+ "2011-01-01 00:00:00+01:00",
58
+ "2011-01-01 01:00:00+01:00",
59
+ "2011-01-01 02:00:00+01:00",
60
+ ],
61
+ dtype="M8[ns, Europe/Paris]",
62
+ freq="h",
63
+ )
64
+ expected = DataFrame(
65
+ [[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"]
66
+ )
67
+
68
+ tm.assert_frame_equal(result, expected)
69
+
70
+ idx3 = date_range("2011-01-01", periods=3, freq="h", tz="Asia/Tokyo")
71
+ df3 = DataFrame({"b": [1, 2, 3]}, index=idx3)
72
+ result = concat([df1, df3], axis=1)
73
+
74
+ exp_idx = DatetimeIndex(
75
+ [
76
+ "2010-12-31 15:00:00+00:00",
77
+ "2010-12-31 16:00:00+00:00",
78
+ "2010-12-31 17:00:00+00:00",
79
+ "2010-12-31 23:00:00+00:00",
80
+ "2011-01-01 00:00:00+00:00",
81
+ "2011-01-01 01:00:00+00:00",
82
+ ]
83
+ ).as_unit("ns")
84
+
85
+ expected = DataFrame(
86
+ [
87
+ [np.nan, 1],
88
+ [np.nan, 2],
89
+ [np.nan, 3],
90
+ [1, np.nan],
91
+ [2, np.nan],
92
+ [3, np.nan],
93
+ ],
94
+ index=exp_idx,
95
+ columns=["a", "b"],
96
+ )
97
+
98
+ tm.assert_frame_equal(result, expected)
99
+
100
+ # GH 13783: Concat after resample
101
+ result = concat([df1.resample("h").mean(), df2.resample("h").mean()], sort=True)
102
+ expected = DataFrame(
103
+ {"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]},
104
+ index=idx1.append(idx1),
105
+ )
106
+ tm.assert_frame_equal(result, expected)
107
+
108
+ def test_concat_datetimeindex_freq(self):
109
+ # GH 3232
110
+ # Monotonic index result
111
+ dr = date_range("01-Jan-2013", periods=100, freq="50ms", tz="UTC")
112
+ data = list(range(100))
113
+ expected = DataFrame(data, index=dr)
114
+ result = concat([expected[:50], expected[50:]])
115
+ tm.assert_frame_equal(result, expected)
116
+
117
+ # Non-monotonic index result
118
+ result = concat([expected[50:], expected[:50]])
119
+ expected = DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50]))
120
+ expected.index._data.freq = None
121
+ tm.assert_frame_equal(result, expected)
122
+
123
+ def test_concat_multiindex_datetime_object_index(self):
124
+ # https://github.com/pandas-dev/pandas/issues/11058
125
+ idx = Index(
126
+ [dt.date(2013, 1, 1), dt.date(2014, 1, 1), dt.date(2015, 1, 1)],
127
+ dtype="object",
128
+ )
129
+
130
+ s = Series(
131
+ ["a", "b"],
132
+ index=MultiIndex.from_arrays(
133
+ [
134
+ [1, 2],
135
+ idx[:-1],
136
+ ],
137
+ names=["first", "second"],
138
+ ),
139
+ )
140
+ s2 = Series(
141
+ ["a", "b"],
142
+ index=MultiIndex.from_arrays(
143
+ [[1, 2], idx[::2]],
144
+ names=["first", "second"],
145
+ ),
146
+ )
147
+ mi = MultiIndex.from_arrays(
148
+ [[1, 2, 2], idx],
149
+ names=["first", "second"],
150
+ )
151
+ assert mi.levels[1].dtype == object
152
+
153
+ expected = DataFrame(
154
+ [["a", "a"], ["b", np.nan], [np.nan, "b"]],
155
+ index=mi,
156
+ )
157
+ result = concat([s, s2], axis=1)
158
+ tm.assert_frame_equal(result, expected)
159
+
160
+ def test_concat_NaT_series(self):
161
+ # GH 11693
162
+ # test for merging NaT series with datetime series.
163
+ x = Series(
164
+ date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="US/Eastern")
165
+ )
166
+ y = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
167
+ expected = Series([x[0], x[1], pd.NaT, pd.NaT])
168
+
169
+ result = concat([x, y], ignore_index=True)
170
+ tm.assert_series_equal(result, expected)
171
+
172
+ # all NaT with tz
173
+ expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns, US/Eastern]")
174
+ result = concat([y, y], ignore_index=True)
175
+ tm.assert_series_equal(result, expected)
176
+
177
+ def test_concat_NaT_series2(self):
178
+ # without tz
179
+ x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h"))
180
+ y = Series(date_range("20151124 10:00", "20151124 11:00", freq="1h"))
181
+ y[:] = pd.NaT
182
+ expected = Series([x[0], x[1], pd.NaT, pd.NaT])
183
+ result = concat([x, y], ignore_index=True)
184
+ tm.assert_series_equal(result, expected)
185
+
186
+ # all NaT without tz
187
+ x[:] = pd.NaT
188
+ expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns]")
189
+ result = concat([x, y], ignore_index=True)
190
+ tm.assert_series_equal(result, expected)
191
+
192
+ @pytest.mark.parametrize("tz", [None, "UTC"])
193
+ def test_concat_NaT_dataframes(self, tz):
194
+ # GH 12396
195
+
196
+ dti = DatetimeIndex([pd.NaT, pd.NaT], tz=tz)
197
+ first = DataFrame({0: dti})
198
+ second = DataFrame(
199
+ [[Timestamp("2015/01/01", tz=tz)], [Timestamp("2016/01/01", tz=tz)]],
200
+ index=[2, 3],
201
+ )
202
+ expected = DataFrame(
203
+ [
204
+ pd.NaT,
205
+ pd.NaT,
206
+ Timestamp("2015/01/01", tz=tz),
207
+ Timestamp("2016/01/01", tz=tz),
208
+ ]
209
+ )
210
+
211
+ result = concat([first, second], axis=0)
212
+ tm.assert_frame_equal(result, expected)
213
+
214
+ @pytest.mark.parametrize("tz1", [None, "UTC"])
215
+ @pytest.mark.parametrize("tz2", [None, "UTC"])
216
+ @pytest.mark.parametrize("item", [pd.NaT, Timestamp("20150101")])
217
+ def test_concat_NaT_dataframes_all_NaT_axis_0(
218
+ self, tz1, tz2, item, using_array_manager
219
+ ):
220
+ # GH 12396
221
+
222
+ # tz-naive
223
+ first = DataFrame([[pd.NaT], [pd.NaT]]).apply(lambda x: x.dt.tz_localize(tz1))
224
+ second = DataFrame([item]).apply(lambda x: x.dt.tz_localize(tz2))
225
+
226
+ result = concat([first, second], axis=0)
227
+ expected = DataFrame(Series([pd.NaT, pd.NaT, item], index=[0, 1, 0]))
228
+ expected = expected.apply(lambda x: x.dt.tz_localize(tz2))
229
+ if tz1 != tz2:
230
+ expected = expected.astype(object)
231
+ if item is pd.NaT and not using_array_manager:
232
+ # GH#18463
233
+ # TODO: setting nan here is to keep the test passing as we
234
+ # make assert_frame_equal stricter, but is nan really the
235
+ # ideal behavior here?
236
+ if tz1 is not None:
237
+ expected.iloc[-1, 0] = np.nan
238
+ else:
239
+ expected.iloc[:-1, 0] = np.nan
240
+
241
+ tm.assert_frame_equal(result, expected)
242
+
243
+ @pytest.mark.parametrize("tz1", [None, "UTC"])
244
+ @pytest.mark.parametrize("tz2", [None, "UTC"])
245
+ def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2):
246
+ # GH 12396
247
+
248
+ first = DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))
249
+ second = DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])
250
+ expected = DataFrame(
251
+ {
252
+ 0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),
253
+ 1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),
254
+ }
255
+ )
256
+ result = concat([first, second], axis=1)
257
+ tm.assert_frame_equal(result, expected)
258
+
259
+ @pytest.mark.parametrize("tz1", [None, "UTC"])
260
+ @pytest.mark.parametrize("tz2", [None, "UTC"])
261
+ def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2):
262
+ # GH 12396
263
+
264
+ # tz-naive
265
+ first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)
266
+ second = DataFrame(
267
+ [
268
+ [Timestamp("2015/01/01", tz=tz2)],
269
+ [Timestamp("2016/01/01", tz=tz2)],
270
+ ],
271
+ index=[2, 3],
272
+ )
273
+
274
+ expected = DataFrame(
275
+ [
276
+ pd.NaT,
277
+ pd.NaT,
278
+ Timestamp("2015/01/01", tz=tz2),
279
+ Timestamp("2016/01/01", tz=tz2),
280
+ ]
281
+ )
282
+ if tz1 != tz2:
283
+ expected = expected.astype(object)
284
+
285
+ result = concat([first, second])
286
+ tm.assert_frame_equal(result, expected)
287
+
288
+
289
+ class TestTimezoneConcat:
290
+ def test_concat_tz_series(self):
291
+ # gh-11755: tz and no tz
292
+ x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
293
+ y = Series(date_range("2012-01-01", "2012-01-02"))
294
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
295
+ result = concat([x, y], ignore_index=True)
296
+ tm.assert_series_equal(result, expected)
297
+
298
+ def test_concat_tz_series2(self):
299
+ # gh-11887: concat tz and object
300
+ x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
301
+ y = Series(["a", "b"])
302
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
303
+ result = concat([x, y], ignore_index=True)
304
+ tm.assert_series_equal(result, expected)
305
+
306
+ def test_concat_tz_series3(self, unit, unit2):
307
+ # see gh-12217 and gh-12306
308
+ # Concatenating two UTC times
309
+ first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
310
+ first[0] = first[0].dt.tz_localize("UTC")
311
+
312
+ second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]")
313
+ second[0] = second[0].dt.tz_localize("UTC")
314
+
315
+ result = concat([first, second])
316
+ exp_unit = tm.get_finest_unit(unit, unit2)
317
+ assert result[0].dtype == f"datetime64[{exp_unit}, UTC]"
318
+
319
+ def test_concat_tz_series4(self, unit, unit2):
320
+ # Concatenating two London times
321
+ first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
322
+ first[0] = first[0].dt.tz_localize("Europe/London")
323
+
324
+ second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]")
325
+ second[0] = second[0].dt.tz_localize("Europe/London")
326
+
327
+ result = concat([first, second])
328
+ exp_unit = tm.get_finest_unit(unit, unit2)
329
+ assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
330
+
331
+ def test_concat_tz_series5(self, unit, unit2):
332
+ # Concatenating 2+1 London times
333
+ first = DataFrame(
334
+ [[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]], dtype=f"M8[{unit}]"
335
+ )
336
+ first[0] = first[0].dt.tz_localize("Europe/London")
337
+
338
+ second = DataFrame([[datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]")
339
+ second[0] = second[0].dt.tz_localize("Europe/London")
340
+
341
+ result = concat([first, second])
342
+ exp_unit = tm.get_finest_unit(unit, unit2)
343
+ assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
344
+
345
+ def test_concat_tz_series6(self, unit, unit2):
346
+ # Concatenating 1+2 London times
347
+ first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
348
+ first[0] = first[0].dt.tz_localize("Europe/London")
349
+
350
+ second = DataFrame(
351
+ [[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]"
352
+ )
353
+ second[0] = second[0].dt.tz_localize("Europe/London")
354
+
355
+ result = concat([first, second])
356
+ exp_unit = tm.get_finest_unit(unit, unit2)
357
+ assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
358
+
359
+ def test_concat_tz_series_tzlocal(self):
360
+ # see gh-13583
361
+ x = [
362
+ Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()),
363
+ Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()),
364
+ ]
365
+ y = [
366
+ Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()),
367
+ Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()),
368
+ ]
369
+
370
+ result = concat([Series(x), Series(y)], ignore_index=True)
371
+ tm.assert_series_equal(result, Series(x + y))
372
+ assert result.dtype == "datetime64[ns, tzlocal()]"
373
+
374
+ def test_concat_tz_series_with_datetimelike(self):
375
+ # see gh-12620: tz and timedelta
376
+ x = [
377
+ Timestamp("2011-01-01", tz="US/Eastern"),
378
+ Timestamp("2011-02-01", tz="US/Eastern"),
379
+ ]
380
+ y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
381
+ result = concat([Series(x), Series(y)], ignore_index=True)
382
+ tm.assert_series_equal(result, Series(x + y, dtype="object"))
383
+
384
+ # tz and period
385
+ y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")]
386
+ result = concat([Series(x), Series(y)], ignore_index=True)
387
+ tm.assert_series_equal(result, Series(x + y, dtype="object"))
388
+
389
+ def test_concat_tz_frame(self):
390
+ df2 = DataFrame(
391
+ {
392
+ "A": Timestamp("20130102", tz="US/Eastern"),
393
+ "B": Timestamp("20130603", tz="CET"),
394
+ },
395
+ index=range(5),
396
+ )
397
+
398
+ # concat
399
+ df3 = concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
400
+ tm.assert_frame_equal(df2, df3)
401
+
402
+ def test_concat_multiple_tzs(self):
403
+ # GH#12467
404
+ # combining datetime tz-aware and naive DataFrames
405
+ ts1 = Timestamp("2015-01-01", tz=None)
406
+ ts2 = Timestamp("2015-01-01", tz="UTC")
407
+ ts3 = Timestamp("2015-01-01", tz="EST")
408
+
409
+ df1 = DataFrame({"time": [ts1]})
410
+ df2 = DataFrame({"time": [ts2]})
411
+ df3 = DataFrame({"time": [ts3]})
412
+
413
+ results = concat([df1, df2]).reset_index(drop=True)
414
+ expected = DataFrame({"time": [ts1, ts2]}, dtype=object)
415
+ tm.assert_frame_equal(results, expected)
416
+
417
+ results = concat([df1, df3]).reset_index(drop=True)
418
+ expected = DataFrame({"time": [ts1, ts3]}, dtype=object)
419
+ tm.assert_frame_equal(results, expected)
420
+
421
+ results = concat([df2, df3]).reset_index(drop=True)
422
+ expected = DataFrame({"time": [ts2, ts3]})
423
+ tm.assert_frame_equal(results, expected)
424
+
425
+ def test_concat_multiindex_with_tz(self):
426
+ # GH 6606
427
+ df = DataFrame(
428
+ {
429
+ "dt": DatetimeIndex(
430
+ [
431
+ datetime(2014, 1, 1),
432
+ datetime(2014, 1, 2),
433
+ datetime(2014, 1, 3),
434
+ ],
435
+ dtype="M8[ns, US/Pacific]",
436
+ ),
437
+ "b": ["A", "B", "C"],
438
+ "c": [1, 2, 3],
439
+ "d": [4, 5, 6],
440
+ }
441
+ )
442
+ df = df.set_index(["dt", "b"])
443
+
444
+ exp_idx1 = DatetimeIndex(
445
+ ["2014-01-01", "2014-01-02", "2014-01-03"] * 2,
446
+ dtype="M8[ns, US/Pacific]",
447
+ name="dt",
448
+ )
449
+ exp_idx2 = Index(["A", "B", "C"] * 2, name="b")
450
+ exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
451
+ expected = DataFrame(
452
+ {"c": [1, 2, 3] * 2, "d": [4, 5, 6] * 2}, index=exp_idx, columns=["c", "d"]
453
+ )
454
+
455
+ result = concat([df, df])
456
+ tm.assert_frame_equal(result, expected)
457
+
458
+ def test_concat_tz_not_aligned(self):
459
+ # GH#22796
460
+ ts = pd.to_datetime([1, 2]).tz_localize("UTC")
461
+ a = DataFrame({"A": ts})
462
+ b = DataFrame({"A": ts, "B": ts})
463
+ result = concat([a, b], sort=True, ignore_index=True)
464
+ expected = DataFrame(
465
+ {"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}
466
+ )
467
+ tm.assert_frame_equal(result, expected)
468
+
469
+ @pytest.mark.parametrize(
470
+ "t1",
471
+ [
472
+ "2015-01-01",
473
+ pytest.param(
474
+ pd.NaT,
475
+ marks=pytest.mark.xfail(
476
+ reason="GH23037 incorrect dtype when concatenating"
477
+ ),
478
+ ),
479
+ ],
480
+ )
481
+ def test_concat_tz_NaT(self, t1):
482
+ # GH#22796
483
+ # Concatenating tz-aware multicolumn DataFrames
484
+ ts1 = Timestamp(t1, tz="UTC")
485
+ ts2 = Timestamp("2015-01-01", tz="UTC")
486
+ ts3 = Timestamp("2015-01-01", tz="UTC")
487
+
488
+ df1 = DataFrame([[ts1, ts2]])
489
+ df2 = DataFrame([[ts3]])
490
+
491
+ result = concat([df1, df2])
492
+ expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
493
+
494
+ tm.assert_frame_equal(result, expected)
495
+
496
+ def test_concat_tz_with_empty(self):
497
+ # GH 9188
498
+ result = concat(
499
+ [DataFrame(date_range("2000", periods=1, tz="UTC")), DataFrame()]
500
+ )
501
+ expected = DataFrame(date_range("2000", periods=1, tz="UTC"))
502
+ tm.assert_frame_equal(result, expected)
503
+
504
+
505
+ class TestPeriodConcat:
506
+ def test_concat_period_series(self):
507
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
508
+ y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="D"))
509
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="Period[D]")
510
+ result = concat([x, y], ignore_index=True)
511
+ tm.assert_series_equal(result, expected)
512
+
513
+ def test_concat_period_multiple_freq_series(self):
514
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
515
+ y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="M"))
516
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
517
+ result = concat([x, y], ignore_index=True)
518
+ tm.assert_series_equal(result, expected)
519
+ assert result.dtype == "object"
520
+
521
+ def test_concat_period_other_series(self):
522
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
523
+ y = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="M"))
524
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
525
+ result = concat([x, y], ignore_index=True)
526
+ tm.assert_series_equal(result, expected)
527
+ assert result.dtype == "object"
528
+
529
+ def test_concat_period_other_series2(self):
530
+ # non-period
531
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
532
+ y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"]))
533
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
534
+ result = concat([x, y], ignore_index=True)
535
+ tm.assert_series_equal(result, expected)
536
+ assert result.dtype == "object"
537
+
538
+ def test_concat_period_other_series3(self):
539
+ x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
540
+ y = Series(["A", "B"])
541
+ expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
542
+ result = concat([x, y], ignore_index=True)
543
+ tm.assert_series_equal(result, expected)
544
+ assert result.dtype == "object"
545
+
546
+
547
+ def test_concat_timedelta64_block():
548
+ rng = to_timedelta(np.arange(10), unit="s")
549
+
550
+ df = DataFrame({"time": rng})
551
+
552
+ result = concat([df, df])
553
+ tm.assert_frame_equal(result.iloc[:10], df)
554
+ tm.assert_frame_equal(result.iloc[10:], df)
555
+
556
+
557
+ def test_concat_multiindex_datetime_nat():
558
+ # GH#44900
559
+ left = DataFrame({"a": 1}, index=MultiIndex.from_tuples([(1, pd.NaT)]))
560
+ right = DataFrame(
561
+ {"b": 2}, index=MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
562
+ )
563
+ result = concat([left, right], axis="columns")
564
+ expected = DataFrame(
565
+ {"a": [1.0, np.nan], "b": 2}, MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
566
+ )
567
+ tm.assert_frame_equal(result, expected)
568
+
569
+
570
+ def test_concat_float_datetime64(using_array_manager):
571
+ # GH#32934
572
+ df_time = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")})
573
+ df_float = DataFrame({"A": pd.array([1.0], dtype="float64")})
574
+
575
+ expected = DataFrame(
576
+ {
577
+ "A": [
578
+ pd.array(["2000"], dtype="datetime64[ns]")[0],
579
+ pd.array([1.0], dtype="float64")[0],
580
+ ]
581
+ },
582
+ index=[0, 0],
583
+ )
584
+ result = concat([df_time, df_float])
585
+ tm.assert_frame_equal(result, expected)
586
+
587
+ expected = DataFrame({"A": pd.array([], dtype="object")})
588
+ result = concat([df_time.iloc[:0], df_float.iloc[:0]])
589
+ tm.assert_frame_equal(result, expected)
590
+
591
+ expected = DataFrame({"A": pd.array([1.0], dtype="object")})
592
+ result = concat([df_time.iloc[:0], df_float])
593
+ tm.assert_frame_equal(result, expected)
594
+
595
+ if not using_array_manager:
596
+ expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")})
597
+ msg = "The behavior of DataFrame concatenation with empty or all-NA entries"
598
+ with tm.assert_produces_warning(FutureWarning, match=msg):
599
+ result = concat([df_time, df_float.iloc[:0]])
600
+ tm.assert_frame_equal(result, expected)
601
+ else:
602
+ expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}).astype(
603
+ {"A": "object"}
604
+ )
605
+ result = concat([df_time, df_float.iloc[:0]])
606
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ RangeIndex,
8
+ Series,
9
+ concat,
10
+ date_range,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+
15
+ class TestEmptyConcat:
16
+ def test_handle_empty_objects(self, sort, using_infer_string):
17
+ df = DataFrame(
18
+ np.random.default_rng(2).standard_normal((10, 4)), columns=list("abcd")
19
+ )
20
+
21
+ dfcopy = df[:5].copy()
22
+ dfcopy["foo"] = "bar"
23
+ empty = df[5:5]
24
+
25
+ frames = [dfcopy, empty, empty, df[5:]]
26
+ concatted = concat(frames, axis=0, sort=sort)
27
+
28
+ expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
29
+ expected["foo"] = expected["foo"].astype(
30
+ object if not using_infer_string else "string[pyarrow_numpy]"
31
+ )
32
+ expected.loc[0:4, "foo"] = "bar"
33
+
34
+ tm.assert_frame_equal(concatted, expected)
35
+
36
+ # empty as first element with time series
37
+ # GH3259
38
+ df = DataFrame(
39
+ {"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s")
40
+ )
41
+ empty = DataFrame()
42
+ result = concat([df, empty], axis=1)
43
+ tm.assert_frame_equal(result, df)
44
+ result = concat([empty, df], axis=1)
45
+ tm.assert_frame_equal(result, df)
46
+
47
+ result = concat([df, empty])
48
+ tm.assert_frame_equal(result, df)
49
+ result = concat([empty, df])
50
+ tm.assert_frame_equal(result, df)
51
+
52
+ def test_concat_empty_series(self):
53
+ # GH 11082
54
+ s1 = Series([1, 2, 3], name="x")
55
+ s2 = Series(name="y", dtype="float64")
56
+ res = concat([s1, s2], axis=1)
57
+ exp = DataFrame(
58
+ {"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
59
+ index=RangeIndex(3),
60
+ )
61
+ tm.assert_frame_equal(res, exp)
62
+
63
+ s1 = Series([1, 2, 3], name="x")
64
+ s2 = Series(name="y", dtype="float64")
65
+ msg = "The behavior of array concatenation with empty entries is deprecated"
66
+ with tm.assert_produces_warning(FutureWarning, match=msg):
67
+ res = concat([s1, s2], axis=0)
68
+ # name will be reset
69
+ exp = Series([1, 2, 3])
70
+ tm.assert_series_equal(res, exp)
71
+
72
+ # empty Series with no name
73
+ s1 = Series([1, 2, 3], name="x")
74
+ s2 = Series(name=None, dtype="float64")
75
+ res = concat([s1, s2], axis=1)
76
+ exp = DataFrame(
77
+ {"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
78
+ columns=["x", 0],
79
+ index=RangeIndex(3),
80
+ )
81
+ tm.assert_frame_equal(res, exp)
82
+
83
+ @pytest.mark.parametrize("tz", [None, "UTC"])
84
+ @pytest.mark.parametrize("values", [[], [1, 2, 3]])
85
+ def test_concat_empty_series_timelike(self, tz, values):
86
+ # GH 18447
87
+
88
+ first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
89
+ dtype = None if values else np.float64
90
+ second = Series(values, dtype=dtype)
91
+
92
+ expected = DataFrame(
93
+ {
94
+ 0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
95
+ 1: values,
96
+ }
97
+ )
98
+ result = concat([first, second], axis=1)
99
+ tm.assert_frame_equal(result, expected)
100
+
101
+ @pytest.mark.parametrize(
102
+ "left,right,expected",
103
+ [
104
+ # booleans
105
+ (np.bool_, np.int32, np.object_), # changed from int32 in 2.0 GH#39817
106
+ (np.bool_, np.float32, np.object_),
107
+ # datetime-like
108
+ ("m8[ns]", np.bool_, np.object_),
109
+ ("m8[ns]", np.int64, np.object_),
110
+ ("M8[ns]", np.bool_, np.object_),
111
+ ("M8[ns]", np.int64, np.object_),
112
+ # categorical
113
+ ("category", "category", "category"),
114
+ ("category", "object", "object"),
115
+ ],
116
+ )
117
+ def test_concat_empty_series_dtypes(self, left, right, expected):
118
+ # GH#39817, GH#45101
119
+ result = concat([Series(dtype=left), Series(dtype=right)])
120
+ assert result.dtype == expected
121
+
122
+ @pytest.mark.parametrize(
123
+ "dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]
124
+ )
125
+ def test_concat_empty_series_dtypes_match_roundtrips(self, dtype):
126
+ dtype = np.dtype(dtype)
127
+
128
+ result = concat([Series(dtype=dtype)])
129
+ assert result.dtype == dtype
130
+
131
+ result = concat([Series(dtype=dtype), Series(dtype=dtype)])
132
+ assert result.dtype == dtype
133
+
134
+ @pytest.mark.parametrize("dtype", ["float64", "int8", "uint8", "m8[ns]", "M8[ns]"])
135
+ @pytest.mark.parametrize(
136
+ "dtype2",
137
+ ["float64", "int8", "uint8", "m8[ns]", "M8[ns]"],
138
+ )
139
+ def test_concat_empty_series_dtypes_roundtrips(self, dtype, dtype2):
140
+ # round-tripping with self & like self
141
+ if dtype == dtype2:
142
+ pytest.skip("same dtype is not applicable for test")
143
+
144
+ def int_result_type(dtype, dtype2):
145
+ typs = {dtype.kind, dtype2.kind}
146
+ if not len(typs - {"i", "u", "b"}) and (
147
+ dtype.kind == "i" or dtype2.kind == "i"
148
+ ):
149
+ return "i"
150
+ elif not len(typs - {"u", "b"}) and (
151
+ dtype.kind == "u" or dtype2.kind == "u"
152
+ ):
153
+ return "u"
154
+ return None
155
+
156
+ def float_result_type(dtype, dtype2):
157
+ typs = {dtype.kind, dtype2.kind}
158
+ if not len(typs - {"f", "i", "u"}) and (
159
+ dtype.kind == "f" or dtype2.kind == "f"
160
+ ):
161
+ return "f"
162
+ return None
163
+
164
+ def get_result_type(dtype, dtype2):
165
+ result = float_result_type(dtype, dtype2)
166
+ if result is not None:
167
+ return result
168
+ result = int_result_type(dtype, dtype2)
169
+ if result is not None:
170
+ return result
171
+ return "O"
172
+
173
+ dtype = np.dtype(dtype)
174
+ dtype2 = np.dtype(dtype2)
175
+ expected = get_result_type(dtype, dtype2)
176
+ result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype
177
+ assert result.kind == expected
178
+
179
+ def test_concat_empty_series_dtypes_triple(self):
180
+ assert (
181
+ concat(
182
+ [Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)]
183
+ ).dtype
184
+ == np.object_
185
+ )
186
+
187
+ def test_concat_empty_series_dtype_category_with_array(self):
188
+ # GH#18515
189
+ assert (
190
+ concat(
191
+ [Series(np.array([]), dtype="category"), Series(dtype="float64")]
192
+ ).dtype
193
+ == "float64"
194
+ )
195
+
196
+ def test_concat_empty_series_dtypes_sparse(self):
197
+ result = concat(
198
+ [
199
+ Series(dtype="float64").astype("Sparse"),
200
+ Series(dtype="float64").astype("Sparse"),
201
+ ]
202
+ )
203
+ assert result.dtype == "Sparse[float64]"
204
+
205
+ result = concat(
206
+ [Series(dtype="float64").astype("Sparse"), Series(dtype="float64")]
207
+ )
208
+ expected = pd.SparseDtype(np.float64)
209
+ assert result.dtype == expected
210
+
211
+ result = concat(
212
+ [Series(dtype="float64").astype("Sparse"), Series(dtype="object")]
213
+ )
214
+ expected = pd.SparseDtype("object")
215
+ assert result.dtype == expected
216
+
217
+ def test_concat_empty_df_object_dtype(self):
218
+ # GH 9149
219
+ df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
220
+ df_2 = DataFrame(columns=df_1.columns)
221
+ result = concat([df_1, df_2], axis=0)
222
+ expected = df_1.astype(object)
223
+ tm.assert_frame_equal(result, expected)
224
+
225
+ def test_concat_empty_dataframe_dtypes(self):
226
+ df = DataFrame(columns=list("abc"))
227
+ df["a"] = df["a"].astype(np.bool_)
228
+ df["b"] = df["b"].astype(np.int32)
229
+ df["c"] = df["c"].astype(np.float64)
230
+
231
+ result = concat([df, df])
232
+ assert result["a"].dtype == np.bool_
233
+ assert result["b"].dtype == np.int32
234
+ assert result["c"].dtype == np.float64
235
+
236
+ result = concat([df, df.astype(np.float64)])
237
+ assert result["a"].dtype == np.object_
238
+ assert result["b"].dtype == np.float64
239
+ assert result["c"].dtype == np.float64
240
+
241
+ def test_concat_inner_join_empty(self):
242
+ # GH 15328
243
+ df_empty = DataFrame()
244
+ df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
245
+ df_expected = DataFrame({"a": []}, index=RangeIndex(0), dtype="int64")
246
+
247
+ result = concat([df_a, df_empty], axis=1, join="inner")
248
+ tm.assert_frame_equal(result, df_expected)
249
+
250
+ result = concat([df_a, df_empty], axis=1, join="outer")
251
+ tm.assert_frame_equal(result, df_a)
252
+
253
+ def test_empty_dtype_coerce(self):
254
+ # xref to #12411
255
+ # xref to #12045
256
+ # xref to #11594
257
+ # see below
258
+
259
+ # 10571
260
+ df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
261
+ df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
262
+ result = concat([df1, df2])
263
+ expected = df1.dtypes
264
+ tm.assert_series_equal(result.dtypes, expected)
265
+
266
+ def test_concat_empty_dataframe(self):
267
+ # 39037
268
+ df1 = DataFrame(columns=["a", "b"])
269
+ df2 = DataFrame(columns=["b", "c"])
270
+ result = concat([df1, df2, df1])
271
+ expected = DataFrame(columns=["a", "b", "c"])
272
+ tm.assert_frame_equal(result, expected)
273
+
274
+ df3 = DataFrame(columns=["a", "b"])
275
+ df4 = DataFrame(columns=["b"])
276
+ result = concat([df3, df4])
277
+ expected = DataFrame(columns=["a", "b"])
278
+ tm.assert_frame_equal(result, expected)
279
+
280
+ def test_concat_empty_dataframe_different_dtypes(self, using_infer_string):
281
+ # 39037
282
+ df1 = DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
283
+ df2 = DataFrame({"a": [1, 2, 3]})
284
+
285
+ result = concat([df1[:0], df2[:0]])
286
+ assert result["a"].dtype == np.int64
287
+ assert result["b"].dtype == np.object_ if not using_infer_string else "string"
288
+
289
+ def test_concat_to_empty_ea(self):
290
+ """48510 `concat` to an empty EA should maintain type EA dtype."""
291
+ df_empty = DataFrame({"a": pd.array([], dtype=pd.Int64Dtype())})
292
+ df_new = DataFrame({"a": pd.array([1, 2, 3], dtype=pd.Int64Dtype())})
293
+ expected = df_new.copy()
294
+ result = concat([df_empty, df_new])
295
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas.errors import PerformanceWarning
7
+
8
+ import pandas as pd
9
+ from pandas import (
10
+ DataFrame,
11
+ Index,
12
+ MultiIndex,
13
+ Series,
14
+ concat,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+
19
+ class TestIndexConcat:
20
+ def test_concat_ignore_index(self, sort):
21
+ frame1 = DataFrame(
22
+ {"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
23
+ )
24
+ frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
25
+ frame1.index = Index(["x", "y", "z"])
26
+ frame2.index = Index(["x", "y", "q"])
27
+
28
+ v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
29
+
30
+ nan = np.nan
31
+ expected = DataFrame(
32
+ [
33
+ [nan, nan, nan, 4.3],
34
+ ["a", 1, 4.5, 5.2],
35
+ ["b", 2, 3.2, 2.2],
36
+ ["c", 3, 1.2, nan],
37
+ ],
38
+ index=Index(["q", "x", "y", "z"]),
39
+ )
40
+ if not sort:
41
+ expected = expected.loc[["x", "y", "z", "q"]]
42
+
43
+ tm.assert_frame_equal(v1, expected)
44
+
45
+ @pytest.mark.parametrize(
46
+ "name_in1,name_in2,name_in3,name_out",
47
+ [
48
+ ("idx", "idx", "idx", "idx"),
49
+ ("idx", "idx", None, None),
50
+ ("idx", None, None, None),
51
+ ("idx1", "idx2", None, None),
52
+ ("idx1", "idx1", "idx2", None),
53
+ ("idx1", "idx2", "idx3", None),
54
+ (None, None, None, None),
55
+ ],
56
+ )
57
+ def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
58
+ # GH13475
59
+ indices = [
60
+ Index(["a", "b", "c"], name=name_in1),
61
+ Index(["b", "c", "d"], name=name_in2),
62
+ Index(["c", "d", "e"], name=name_in3),
63
+ ]
64
+ frames = [
65
+ DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
66
+ ]
67
+ result = concat(frames, axis=1)
68
+
69
+ exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
70
+ expected = DataFrame(
71
+ {
72
+ "x": [0, 1, 2, np.nan, np.nan],
73
+ "y": [np.nan, 0, 1, 2, np.nan],
74
+ "z": [np.nan, np.nan, 0, 1, 2],
75
+ },
76
+ index=exp_ind,
77
+ )
78
+
79
+ tm.assert_frame_equal(result, expected)
80
+
81
+ def test_concat_rename_index(self):
82
+ a = DataFrame(
83
+ np.random.default_rng(2).random((3, 3)),
84
+ columns=list("ABC"),
85
+ index=Index(list("abc"), name="index_a"),
86
+ )
87
+ b = DataFrame(
88
+ np.random.default_rng(2).random((3, 3)),
89
+ columns=list("ABC"),
90
+ index=Index(list("abc"), name="index_b"),
91
+ )
92
+
93
+ result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
94
+
95
+ exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
96
+ names = list(exp.index.names)
97
+ names[1] = "lvl1"
98
+ exp.index.set_names(names, inplace=True)
99
+
100
+ tm.assert_frame_equal(result, exp)
101
+ assert result.index.names == exp.index.names
102
+
103
+ def test_concat_copy_index_series(self, axis, using_copy_on_write):
104
+ # GH 29879
105
+ ser = Series([1, 2])
106
+ comb = concat([ser, ser], axis=axis, copy=True)
107
+ if not using_copy_on_write or axis in [0, "index"]:
108
+ assert comb.index is not ser.index
109
+ else:
110
+ assert comb.index is ser.index
111
+
112
+ def test_concat_copy_index_frame(self, axis, using_copy_on_write):
113
+ # GH 29879
114
+ df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
115
+ comb = concat([df, df], axis=axis, copy=True)
116
+ if not using_copy_on_write:
117
+ assert not comb.index.is_(df.index)
118
+ assert not comb.columns.is_(df.columns)
119
+ elif axis in [0, "index"]:
120
+ assert not comb.index.is_(df.index)
121
+ assert comb.columns.is_(df.columns)
122
+ elif axis in [1, "columns"]:
123
+ assert comb.index.is_(df.index)
124
+ assert not comb.columns.is_(df.columns)
125
+
126
+ def test_default_index(self):
127
+ # is_series and ignore_index
128
+ s1 = Series([1, 2, 3], name="x")
129
+ s2 = Series([4, 5, 6], name="y")
130
+ res = concat([s1, s2], axis=1, ignore_index=True)
131
+ assert isinstance(res.columns, pd.RangeIndex)
132
+ exp = DataFrame([[1, 4], [2, 5], [3, 6]])
133
+ # use check_index_type=True to check the result have
134
+ # RangeIndex (default index)
135
+ tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
136
+
137
+ # is_series and all inputs have no names
138
+ s1 = Series([1, 2, 3])
139
+ s2 = Series([4, 5, 6])
140
+ res = concat([s1, s2], axis=1, ignore_index=False)
141
+ assert isinstance(res.columns, pd.RangeIndex)
142
+ exp = DataFrame([[1, 4], [2, 5], [3, 6]])
143
+ exp.columns = pd.RangeIndex(2)
144
+ tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
145
+
146
+ # is_dataframe and ignore_index
147
+ df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
148
+ df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
149
+
150
+ res = concat([df1, df2], axis=0, ignore_index=True)
151
+ exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
152
+ tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
153
+
154
+ res = concat([df1, df2], axis=1, ignore_index=True)
155
+ exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
156
+ tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
157
+
158
+ def test_dups_index(self):
159
+ # GH 4771
160
+
161
+ # single dtypes
162
+ df = DataFrame(
163
+ np.random.default_rng(2).integers(0, 10, size=40).reshape(10, 4),
164
+ columns=["A", "A", "C", "C"],
165
+ )
166
+
167
+ result = concat([df, df], axis=1)
168
+ tm.assert_frame_equal(result.iloc[:, :4], df)
169
+ tm.assert_frame_equal(result.iloc[:, 4:], df)
170
+
171
+ result = concat([df, df], axis=0)
172
+ tm.assert_frame_equal(result.iloc[:10], df)
173
+ tm.assert_frame_equal(result.iloc[10:], df)
174
+
175
+ # multi dtypes
176
+ df = concat(
177
+ [
178
+ DataFrame(
179
+ np.random.default_rng(2).standard_normal((10, 4)),
180
+ columns=["A", "A", "B", "B"],
181
+ ),
182
+ DataFrame(
183
+ np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2),
184
+ columns=["A", "C"],
185
+ ),
186
+ ],
187
+ axis=1,
188
+ )
189
+
190
+ result = concat([df, df], axis=1)
191
+ tm.assert_frame_equal(result.iloc[:, :6], df)
192
+ tm.assert_frame_equal(result.iloc[:, 6:], df)
193
+
194
+ result = concat([df, df], axis=0)
195
+ tm.assert_frame_equal(result.iloc[:10], df)
196
+ tm.assert_frame_equal(result.iloc[10:], df)
197
+
198
+ # append
199
+ result = df.iloc[0:8, :]._append(df.iloc[8:])
200
+ tm.assert_frame_equal(result, df)
201
+
202
+ result = df.iloc[0:8, :]._append(df.iloc[8:9])._append(df.iloc[9:10])
203
+ tm.assert_frame_equal(result, df)
204
+
205
+ expected = concat([df, df], axis=0)
206
+ result = df._append(df)
207
+ tm.assert_frame_equal(result, expected)
208
+
209
+
210
+ class TestMultiIndexConcat:
211
+ def test_concat_multiindex_with_keys(self, multiindex_dataframe_random_data):
212
+ frame = multiindex_dataframe_random_data
213
+ index = frame.index
214
+ result = concat([frame, frame], keys=[0, 1], names=["iteration"])
215
+
216
+ assert result.index.names == ("iteration",) + index.names
217
+ tm.assert_frame_equal(result.loc[0], frame)
218
+ tm.assert_frame_equal(result.loc[1], frame)
219
+ assert result.index.nlevels == 3
220
+
221
+ def test_concat_multiindex_with_none_in_index_names(self):
222
+ # GH 15787
223
+ index = MultiIndex.from_product([[1], range(5)], names=["level1", None])
224
+ df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
225
+
226
+ result = concat([df, df], keys=[1, 2], names=["level2"])
227
+ index = MultiIndex.from_product(
228
+ [[1, 2], [1], range(5)], names=["level2", "level1", None]
229
+ )
230
+ expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
231
+ tm.assert_frame_equal(result, expected)
232
+
233
+ result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
234
+ level2 = [1] * 5 + [2] * 2
235
+ level1 = [1] * 7
236
+ no_name = list(range(5)) + list(range(2))
237
+ tuples = list(zip(level2, level1, no_name))
238
+ index = MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
239
+ expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
240
+ tm.assert_frame_equal(result, expected)
241
+
242
+ def test_concat_multiindex_rangeindex(self):
243
+ # GH13542
244
+ # when multi-index levels are RangeIndex objects
245
+ # there is a bug in concat with objects of len 1
246
+
247
+ df = DataFrame(np.random.default_rng(2).standard_normal((9, 2)))
248
+ df.index = MultiIndex(
249
+ levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
250
+ codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
251
+ )
252
+
253
+ res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
254
+ exp = df.iloc[[2, 3, 4, 5], :]
255
+ tm.assert_frame_equal(res, exp)
256
+
257
+ def test_concat_multiindex_dfs_with_deepcopy(self):
258
+ # GH 9967
259
+ example_multiindex1 = MultiIndex.from_product([["a"], ["b"]])
260
+ example_dataframe1 = DataFrame([0], index=example_multiindex1)
261
+
262
+ example_multiindex2 = MultiIndex.from_product([["a"], ["c"]])
263
+ example_dataframe2 = DataFrame([1], index=example_multiindex2)
264
+
265
+ example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
266
+ expected_index = MultiIndex(
267
+ levels=[["s1", "s2"], ["a"], ["b", "c"]],
268
+ codes=[[0, 1], [0, 0], [0, 1]],
269
+ names=["testname", None, None],
270
+ )
271
+ expected = DataFrame([[0], [1]], index=expected_index)
272
+ result_copy = concat(deepcopy(example_dict), names=["testname"])
273
+ tm.assert_frame_equal(result_copy, expected)
274
+ result_no_copy = concat(example_dict, names=["testname"])
275
+ tm.assert_frame_equal(result_no_copy, expected)
276
+
277
+ @pytest.mark.parametrize(
278
+ "mi1_list",
279
+ [
280
+ [["a"], range(2)],
281
+ [["b"], np.arange(2.0, 4.0)],
282
+ [["c"], ["A", "B"]],
283
+ [["d"], pd.date_range(start="2017", end="2018", periods=2)],
284
+ ],
285
+ )
286
+ @pytest.mark.parametrize(
287
+ "mi2_list",
288
+ [
289
+ [["a"], range(2)],
290
+ [["b"], np.arange(2.0, 4.0)],
291
+ [["c"], ["A", "B"]],
292
+ [["d"], pd.date_range(start="2017", end="2018", periods=2)],
293
+ ],
294
+ )
295
+ def test_concat_with_various_multiindex_dtypes(
296
+ self, mi1_list: list, mi2_list: list
297
+ ):
298
+ # GitHub #23478
299
+ mi1 = MultiIndex.from_product(mi1_list)
300
+ mi2 = MultiIndex.from_product(mi2_list)
301
+
302
+ df1 = DataFrame(np.zeros((1, len(mi1))), columns=mi1)
303
+ df2 = DataFrame(np.zeros((1, len(mi2))), columns=mi2)
304
+
305
+ if mi1_list[0] == mi2_list[0]:
306
+ expected_mi = MultiIndex(
307
+ levels=[mi1_list[0], list(mi1_list[1])],
308
+ codes=[[0, 0, 0, 0], [0, 1, 0, 1]],
309
+ )
310
+ else:
311
+ expected_mi = MultiIndex(
312
+ levels=[
313
+ mi1_list[0] + mi2_list[0],
314
+ list(mi1_list[1]) + list(mi2_list[1]),
315
+ ],
316
+ codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
317
+ )
318
+
319
+ expected_df = DataFrame(np.zeros((1, len(expected_mi))), columns=expected_mi)
320
+
321
+ with tm.assert_produces_warning(None):
322
+ result_df = concat((df1, df2), axis=1)
323
+
324
+ tm.assert_frame_equal(expected_df, result_df)
325
+
326
+ def test_concat_multiindex_(self):
327
+ # GitHub #44786
328
+ df = DataFrame({"col": ["a", "b", "c"]}, index=["1", "2", "2"])
329
+ df = concat([df], keys=["X"])
330
+
331
+ iterables = [["X"], ["1", "2", "2"]]
332
+ result_index = df.index
333
+ expected_index = MultiIndex.from_product(iterables)
334
+
335
+ tm.assert_index_equal(result_index, expected_index)
336
+
337
+ result_df = df
338
+ expected_df = DataFrame(
339
+ {"col": ["a", "b", "c"]}, index=MultiIndex.from_product(iterables)
340
+ )
341
+ tm.assert_frame_equal(result_df, expected_df)
342
+
343
+ def test_concat_with_key_not_unique(self):
344
+ # GitHub #46519
345
+ df1 = DataFrame({"name": [1]})
346
+ df2 = DataFrame({"name": [2]})
347
+ df3 = DataFrame({"name": [3]})
348
+ df_a = concat([df1, df2, df3], keys=["x", "y", "x"])
349
+ # the warning is caused by indexing unsorted multi-index
350
+ with tm.assert_produces_warning(
351
+ PerformanceWarning, match="indexing past lexsort depth"
352
+ ):
353
+ out_a = df_a.loc[("x", 0), :]
354
+
355
+ df_b = DataFrame(
356
+ {"name": [1, 2, 3]}, index=Index([("x", 0), ("y", 0), ("x", 0)])
357
+ )
358
+ with tm.assert_produces_warning(
359
+ PerformanceWarning, match="indexing past lexsort depth"
360
+ ):
361
+ out_b = df_b.loc[("x", 0)]
362
+
363
+ tm.assert_frame_equal(out_a, out_b)
364
+
365
+ df1 = DataFrame({"name": ["a", "a", "b"]})
366
+ df2 = DataFrame({"name": ["a", "b"]})
367
+ df3 = DataFrame({"name": ["c", "d"]})
368
+ df_a = concat([df1, df2, df3], keys=["x", "y", "x"])
369
+ with tm.assert_produces_warning(
370
+ PerformanceWarning, match="indexing past lexsort depth"
371
+ ):
372
+ out_a = df_a.loc[("x", 0), :]
373
+
374
+ df_b = DataFrame(
375
+ {
376
+ "a": ["x", "x", "x", "y", "y", "x", "x"],
377
+ "b": [0, 1, 2, 0, 1, 0, 1],
378
+ "name": list("aababcd"),
379
+ }
380
+ ).set_index(["a", "b"])
381
+ df_b.index.names = [None, None]
382
+ with tm.assert_produces_warning(
383
+ PerformanceWarning, match="indexing past lexsort depth"
384
+ ):
385
+ out_b = df_b.loc[("x", 0), :]
386
+
387
+ tm.assert_frame_equal(out_a, out_b)
388
+
389
+ def test_concat_with_duplicated_levels(self):
390
+ # keyword levels should be unique
391
+ df1 = DataFrame({"A": [1]}, index=["x"])
392
+ df2 = DataFrame({"A": [1]}, index=["y"])
393
+ msg = r"Level values not unique: \['x', 'y', 'y'\]"
394
+ with pytest.raises(ValueError, match=msg):
395
+ concat([df1, df2], keys=["x", "y"], levels=[["x", "y", "y"]])
396
+
397
+ @pytest.mark.parametrize("levels", [[["x", "y"]], [["x", "y", "y"]]])
398
+ def test_concat_with_levels_with_none_keys(self, levels):
399
+ df1 = DataFrame({"A": [1]}, index=["x"])
400
+ df2 = DataFrame({"A": [1]}, index=["y"])
401
+ msg = "levels supported only when keys is not None"
402
+ with pytest.raises(ValueError, match=msg):
403
+ concat([df1, df2], levels=levels)
404
+
405
+ def test_concat_range_index_result(self):
406
+ # GH#47501
407
+ df1 = DataFrame({"a": [1, 2]})
408
+ df2 = DataFrame({"b": [1, 2]})
409
+
410
+ result = concat([df1, df2], sort=True, axis=1)
411
+ expected = DataFrame({"a": [1, 2], "b": [1, 2]})
412
+ tm.assert_frame_equal(result, expected)
413
+ expected_index = pd.RangeIndex(0, 2)
414
+ tm.assert_index_equal(result.index, expected_index, exact=True)
415
+
416
+ def test_concat_index_keep_dtype(self):
417
+ # GH#47329
418
+ df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype="object"))
419
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="object"))
420
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
421
+ expected = DataFrame(
422
+ [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="object")
423
+ )
424
+ tm.assert_frame_equal(result, expected)
425
+
426
+ def test_concat_index_keep_dtype_ea_numeric(self, any_numeric_ea_dtype):
427
+ # GH#47329
428
+ df1 = DataFrame(
429
+ [[0, 1, 1]], columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype)
430
+ )
431
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=any_numeric_ea_dtype))
432
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
433
+ expected = DataFrame(
434
+ [[0, 1, 1.0], [0, 1, np.nan]],
435
+ columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype),
436
+ )
437
+ tm.assert_frame_equal(result, expected)
438
+
439
+ @pytest.mark.parametrize("dtype", ["Int8", "Int16", "Int32"])
440
+ def test_concat_index_find_common(self, dtype):
441
+ # GH#47329
442
+ df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype))
443
+ df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="Int32"))
444
+ result = concat([df1, df2], ignore_index=True, join="outer", sort=True)
445
+ expected = DataFrame(
446
+ [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="Int32")
447
+ )
448
+ tm.assert_frame_equal(result, expected)
449
+
450
+ def test_concat_axis_1_sort_false_rangeindex(self, using_infer_string):
451
+ # GH 46675
452
+ s1 = Series(["a", "b", "c"])
453
+ s2 = Series(["a", "b"])
454
+ s3 = Series(["a", "b", "c", "d"])
455
+ s4 = Series(
456
+ [], dtype=object if not using_infer_string else "string[pyarrow_numpy]"
457
+ )
458
+ result = concat(
459
+ [s1, s2, s3, s4], sort=False, join="outer", ignore_index=False, axis=1
460
+ )
461
+ expected = DataFrame(
462
+ [
463
+ ["a"] * 3 + [np.nan],
464
+ ["b"] * 3 + [np.nan],
465
+ ["c", np.nan] * 2,
466
+ [np.nan] * 2 + ["d"] + [np.nan],
467
+ ],
468
+ dtype=object if not using_infer_string else "string[pyarrow_numpy]",
469
+ )
470
+ tm.assert_frame_equal(
471
+ result, expected, check_index_type=True, check_column_type=True
472
+ )
env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import StringIO
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ concat,
9
+ read_csv,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ class TestInvalidConcat:
15
+ @pytest.mark.parametrize("obj", [1, {}, [1, 2], (1, 2)])
16
+ def test_concat_invalid(self, obj):
17
+ # trying to concat a ndframe with a non-ndframe
18
+ df1 = DataFrame(range(2))
19
+ msg = (
20
+ f"cannot concatenate object of type '{type(obj)}'; "
21
+ "only Series and DataFrame objs are valid"
22
+ )
23
+ with pytest.raises(TypeError, match=msg):
24
+ concat([df1, obj])
25
+
26
+ def test_concat_invalid_first_argument(self):
27
+ df1 = DataFrame(range(2))
28
+ msg = (
29
+ "first argument must be an iterable of pandas "
30
+ 'objects, you passed an object of type "DataFrame"'
31
+ )
32
+ with pytest.raises(TypeError, match=msg):
33
+ concat(df1)
34
+
35
+ def test_concat_generator_obj(self):
36
+ # generator ok though
37
+ concat(DataFrame(np.random.default_rng(2).random((5, 5))) for _ in range(3))
38
+
39
+ def test_concat_textreader_obj(self):
40
+ # text reader ok
41
+ # GH6583
42
+ data = """index,A,B,C,D
43
+ foo,2,3,4,5
44
+ bar,7,8,9,10
45
+ baz,12,13,14,15
46
+ qux,12,13,14,15
47
+ foo2,12,13,14,15
48
+ bar2,12,13,14,15
49
+ """
50
+
51
+ with read_csv(StringIO(data), chunksize=1) as reader:
52
+ result = concat(reader, ignore_index=True)
53
+ expected = read_csv(StringIO(data))
54
+ tm.assert_frame_equal(result, expected)