diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf9fe0fed2c1beebf6357c190d850661848b2849 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_fillna.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_fillna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e98d3a3c21dc3327db77ad6814d408019aae5502 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_fillna.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_insert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_insert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b80792cbd46c9a8f8fbd73436ca8920c14514905 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_insert.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_repeat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_repeat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eee1a0bcfde7597f271be8f453e68a1577ada3b7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_repeat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_shift.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d21a70fe7ca9d96038f052e9a2211960b45895c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/__pycache__/test_shift.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..311f2b5c9aa598de68543825d1bde9dc814b469e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_astype.py @@ -0,0 +1,176 @@ +from datetime import timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + NaT, + Timedelta, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import TimedeltaArray + + +class TestTimedeltaIndex: + def test_astype_object(self): + idx = timedelta_range(start="1 days", periods=4, freq="D", name="idx") + expected_list = [ + Timedelta("1 days"), + Timedelta("2 days"), + Timedelta("3 days"), + Timedelta("4 days"), + ] + result = idx.astype(object) + expected = Index(expected_list, dtype=object, name="idx") + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + def test_astype_object_with_nat(self): + idx = TimedeltaIndex( + [timedelta(days=1), timedelta(days=2), NaT, timedelta(days=4)], name="idx" + ) + expected_list = [ + Timedelta("1 days"), + Timedelta("2 days"), + NaT, + Timedelta("4 days"), + ] + result = idx.astype(object) + expected = Index(expected_list, dtype=object, name="idx") + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + def test_astype(self): + # GH 13149, GH 13209 + idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan], name="idx") + + result = idx.astype(object) + expected = Index( + [Timedelta("1 days 03:46:40")] + [NaT] * 3, dtype=object, name="idx" + ) + tm.assert_index_equal(result, expected) + + result = idx.astype(np.int64) + expected = Index( + [100000000000000] + [-9223372036854775808] * 3, dtype=np.int64, name="idx" + ) + tm.assert_index_equal(result, expected) + + result = idx.astype(str) + expected = Index([str(x) for x in idx], name="idx", dtype=object) + tm.assert_index_equal(result, expected) + + rng = timedelta_range("1 days", periods=10) + result = rng.astype("i8") + tm.assert_index_equal(result, Index(rng.asi8)) + tm.assert_numpy_array_equal(rng.asi8, result.values) + + def test_astype_uint(self): + arr = timedelta_range("1h", periods=2) + + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint64") + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint32") + + def test_astype_timedelta64(self): + # GH 13149, GH 13209 + idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan]) + + msg = ( + r"Cannot convert from timedelta64\[ns\] to timedelta64. " + "Supported resolutions are 's', 'ms', 'us', 'ns'" + ) + with pytest.raises(ValueError, match=msg): + idx.astype("timedelta64") + + result = idx.astype("timedelta64[ns]") + tm.assert_index_equal(result, idx) + assert result is not idx + + result = idx.astype("timedelta64[ns]", copy=False) + tm.assert_index_equal(result, idx) + assert result is idx + + def test_astype_to_td64d_raises(self, index_or_series): + # We don't support "D" reso + scalar = Timedelta(days=31) + td = index_or_series( + [scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT], + dtype="m8[ns]", + ) + msg = ( + r"Cannot convert from timedelta64\[ns\] to timedelta64\[D\]. " + "Supported resolutions are 's', 'ms', 'us', 'ns'" + ) + with pytest.raises(ValueError, match=msg): + td.astype("timedelta64[D]") + + def test_astype_ms_to_s(self, index_or_series): + scalar = Timedelta(days=31) + td = index_or_series( + [scalar, scalar, scalar + timedelta(minutes=5, seconds=3), NaT], + dtype="m8[ns]", + ) + + exp_values = np.asarray(td).astype("m8[s]") + exp_tda = TimedeltaArray._simple_new(exp_values, dtype=exp_values.dtype) + expected = index_or_series(exp_tda) + assert expected.dtype == "m8[s]" + result = td.astype("timedelta64[s]") + tm.assert_equal(result, expected) + + def test_astype_freq_conversion(self): + # pre-2.0 td64 astype converted to float64. now for supported units + # (s, ms, us, ns) this converts to the requested dtype. + # This matches TDA and Series + tdi = timedelta_range("1 Day", periods=30) + + res = tdi.astype("m8[s]") + exp_values = np.asarray(tdi).astype("m8[s]") + exp_tda = TimedeltaArray._simple_new( + exp_values, dtype=exp_values.dtype, freq=tdi.freq + ) + expected = Index(exp_tda) + assert expected.dtype == "m8[s]" + tm.assert_index_equal(res, expected) + + # check this matches Series and TimedeltaArray + res = tdi._data.astype("m8[s]") + tm.assert_equal(res, expected._values) + + res = tdi.to_series().astype("m8[s]") + tm.assert_equal(res._values, expected._values._with_freq(None)) + + @pytest.mark.parametrize("dtype", [float, "datetime64", "datetime64[ns]"]) + def test_astype_raises(self, dtype): + # GH 13149, GH 13209 + idx = TimedeltaIndex([1e14, "NaT", NaT, np.nan]) + msg = "Cannot cast TimedeltaIndex to dtype" + with pytest.raises(TypeError, match=msg): + idx.astype(dtype) + + def test_astype_category(self): + obj = timedelta_range("1h", periods=2, freq="h") + + result = obj.astype("category") + expected = pd.CategoricalIndex([Timedelta("1h"), Timedelta("2h")]) + tm.assert_index_equal(result, expected) + + result = obj._data.astype("category") + expected = expected.values + tm.assert_categorical_equal(result, expected) + + def test_astype_array_fallback(self): + obj = timedelta_range("1h", periods=2) + result = obj.astype(bool) + expected = Index(np.array([True, True])) + tm.assert_index_equal(result, expected) + + result = obj._data.astype(bool) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..f8164102815f61ec61962524db2a2b3dd0ff6d55 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_insert.py @@ -0,0 +1,145 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas._libs import lib + +import pandas as pd +from pandas import ( + Index, + Timedelta, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestTimedeltaIndexInsert: + def test_insert(self): + idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx") + + result = idx.insert(2, timedelta(days=5)) + exp = TimedeltaIndex(["4day", "1day", "5day", "2day"], name="idx") + tm.assert_index_equal(result, exp) + + # insertion of non-datetime should coerce to object index + result = idx.insert(1, "inserted") + expected = Index( + [Timedelta("4day"), "inserted", Timedelta("1day"), Timedelta("2day")], + name="idx", + ) + assert not isinstance(result, TimedeltaIndex) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + idx = timedelta_range("1day 00:00:01", periods=3, freq="s", name="idx") + + # preserve freq + expected_0 = TimedeltaIndex( + ["1day", "1day 00:00:01", "1day 00:00:02", "1day 00:00:03"], + name="idx", + freq="s", + ) + expected_3 = TimedeltaIndex( + ["1day 00:00:01", "1day 00:00:02", "1day 00:00:03", "1day 00:00:04"], + name="idx", + freq="s", + ) + + # reset freq to None + expected_1_nofreq = TimedeltaIndex( + ["1day 00:00:01", "1day 00:00:01", "1day 00:00:02", "1day 00:00:03"], + name="idx", + freq=None, + ) + expected_3_nofreq = TimedeltaIndex( + ["1day 00:00:01", "1day 00:00:02", "1day 00:00:03", "1day 00:00:05"], + name="idx", + freq=None, + ) + + cases = [ + (0, Timedelta("1day"), expected_0), + (-3, Timedelta("1day"), expected_0), + (3, Timedelta("1day 00:00:04"), expected_3), + (1, Timedelta("1day 00:00:01"), expected_1_nofreq), + (3, Timedelta("1day 00:00:05"), expected_3_nofreq), + ] + + for n, d, expected in cases: + result = idx.insert(n, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + @pytest.mark.parametrize( + "null", [None, np.nan, np.timedelta64("NaT"), pd.NaT, pd.NA] + ) + def test_insert_nat(self, null): + # GH 18295 (test missing) + idx = timedelta_range("1day", "3day") + result = idx.insert(1, null) + expected = TimedeltaIndex(["1day", pd.NaT, "2day", "3day"]) + tm.assert_index_equal(result, expected) + + def test_insert_invalid_na(self): + idx = TimedeltaIndex(["4day", "1day", "2day"], name="idx") + + item = np.datetime64("NaT") + result = idx.insert(0, item) + + expected = Index([item] + list(idx), dtype=object, name="idx") + tm.assert_index_equal(result, expected) + + # Also works if we pass a different dt64nat object + item2 = np.datetime64("NaT") + result = idx.insert(0, item2) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "item", [0, np.int64(0), np.float64(0), np.array(0), np.datetime64(456, "us")] + ) + def test_insert_mismatched_types_raises(self, item): + # GH#33703 dont cast these to td64 + tdi = TimedeltaIndex(["4day", "1day", "2day"], name="idx") + + result = tdi.insert(1, item) + + expected = Index( + [tdi[0], lib.item_from_zerodim(item)] + list(tdi[1:]), + dtype=object, + name="idx", + ) + tm.assert_index_equal(result, expected) + + def test_insert_castable_str(self): + idx = timedelta_range("1day", "3day") + + result = idx.insert(0, "1 Day") + + expected = TimedeltaIndex([idx[0]] + list(idx)) + tm.assert_index_equal(result, expected) + + def test_insert_non_castable_str(self): + idx = timedelta_range("1day", "3day") + + result = idx.insert(0, "foo") + + expected = Index(["foo"] + list(idx), dtype=object) + tm.assert_index_equal(result, expected) + + def test_insert_empty(self): + # Corner case inserting with length zero doesn't raise IndexError + # GH#33573 for freq preservation + idx = timedelta_range("1 Day", periods=3) + td = idx[0] + + result = idx[:0].insert(0, td) + assert result.freq == "D" + + with pytest.raises(IndexError, match="loc must be an integer between"): + result = idx[:0].insert(1, td) + + with pytest.raises(IndexError, match="loc must be an integer between"): + result = idx[:0].insert(-1, td) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.py new file mode 100644 index 0000000000000000000000000000000000000000..2a9b58d1bf322938e9344d0cbacfaa79674fcf0e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_repeat.py @@ -0,0 +1,34 @@ +import numpy as np + +from pandas import ( + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestRepeat: + def test_repeat(self): + index = timedelta_range("1 days", periods=2, freq="D") + exp = TimedeltaIndex(["1 days", "1 days", "2 days", "2 days"]) + for res in [index.repeat(2), np.repeat(index, 2)]: + tm.assert_index_equal(res, exp) + assert res.freq is None + + index = TimedeltaIndex(["1 days", "NaT", "3 days"]) + exp = TimedeltaIndex( + [ + "1 days", + "1 days", + "1 days", + "NaT", + "NaT", + "NaT", + "3 days", + "3 days", + "3 days", + ] + ) + for res in [index.repeat(3), np.repeat(index, 3)]: + tm.assert_index_equal(res, exp) + assert res.freq is None diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py new file mode 100644 index 0000000000000000000000000000000000000000..a0986d1496881a2061ac8306d0a064f4393cd4e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/methods/test_shift.py @@ -0,0 +1,76 @@ +import pytest + +from pandas.errors import NullFrequencyError + +import pandas as pd +from pandas import TimedeltaIndex +import pandas._testing as tm + + +class TestTimedeltaIndexShift: + # ------------------------------------------------------------- + # TimedeltaIndex.shift is used by __add__/__sub__ + + def test_tdi_shift_empty(self): + # GH#9903 + idx = TimedeltaIndex([], name="xxx") + tm.assert_index_equal(idx.shift(0, freq="h"), idx) + tm.assert_index_equal(idx.shift(3, freq="h"), idx) + + def test_tdi_shift_hours(self): + # GH#9903 + idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx") + tm.assert_index_equal(idx.shift(0, freq="h"), idx) + exp = TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx") + tm.assert_index_equal(idx.shift(3, freq="h"), exp) + exp = TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx") + tm.assert_index_equal(idx.shift(-3, freq="h"), exp) + + def test_tdi_shift_minutes(self): + # GH#9903 + idx = TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx") + tm.assert_index_equal(idx.shift(0, freq="min"), idx) + exp = TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx") + tm.assert_index_equal(idx.shift(3, freq="min"), exp) + exp = TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx") + tm.assert_index_equal(idx.shift(-3, freq="min"), exp) + + def test_tdi_shift_int(self): + # GH#8083 + tdi = pd.to_timedelta(range(5), unit="d") + trange = tdi._with_freq("infer") + pd.offsets.Hour(1) + result = trange.shift(1) + expected = TimedeltaIndex( + [ + "1 days 01:00:00", + "2 days 01:00:00", + "3 days 01:00:00", + "4 days 01:00:00", + "5 days 01:00:00", + ], + freq="D", + ) + tm.assert_index_equal(result, expected) + + def test_tdi_shift_nonstandard_freq(self): + # GH#8083 + tdi = pd.to_timedelta(range(5), unit="d") + trange = tdi._with_freq("infer") + pd.offsets.Hour(1) + result = trange.shift(3, freq="2D 1s") + expected = TimedeltaIndex( + [ + "6 days 01:00:03", + "7 days 01:00:03", + "8 days 01:00:03", + "9 days 01:00:03", + "10 days 01:00:03", + ], + freq="D", + ) + tm.assert_index_equal(result, expected) + + def test_shift_no_freq(self): + # GH#19147 + tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None) + with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"): + tdi.shift(2) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_ops.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f6013baf86edcd566a17cd3127467a7443ac475a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_ops.py @@ -0,0 +1,14 @@ +from pandas import ( + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + + +class TestTimedeltaIndexOps: + def test_infer_freq(self, freq_sample): + # GH#11018 + idx = timedelta_range("1", freq=freq_sample, periods=10) + result = TimedeltaIndex(idx.asi8, freq="infer") + tm.assert_index_equal(idx, result) + assert result.freq == freq_sample diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.py new file mode 100644 index 0000000000000000000000000000000000000000..710571ef383970097985f44e09ecba77fcf63f74 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_searchsorted.py @@ -0,0 +1,28 @@ +import numpy as np +import pytest + +from pandas import ( + TimedeltaIndex, + Timestamp, +) +import pandas._testing as tm + + +class TestSearchSorted: + def test_searchsorted_different_argument_classes(self, listlike_box): + idx = TimedeltaIndex(["1 day", "2 days", "3 days"]) + result = idx.searchsorted(listlike_box(idx)) + expected = np.arange(len(idx), dtype=result.dtype) + tm.assert_numpy_array_equal(result, expected) + + result = idx._data.searchsorted(listlike_box(idx)) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] + ) + def test_searchsorted_invalid_argument_dtype(self, arg): + idx = TimedeltaIndex(["1 day", "2 days", "3 days"]) + msg = "value should be a 'Timedelta', 'NaT', or array of those. Got" + with pytest.raises(TypeError, match=msg): + idx.searchsorted(arg) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_setops.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_setops.py new file mode 100644 index 0000000000000000000000000000000000000000..fce10d9176d7438e63a5e46eede1bb96b41bb8bd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_setops.py @@ -0,0 +1,254 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + +from pandas.tseries.offsets import Hour + + +class TestTimedeltaIndex: + def test_union(self): + i1 = timedelta_range("1day", periods=5) + i2 = timedelta_range("3day", periods=5) + result = i1.union(i2) + expected = timedelta_range("1day", periods=7) + tm.assert_index_equal(result, expected) + + i1 = Index(np.arange(0, 20, 2, dtype=np.int64)) + i2 = timedelta_range(start="1 day", periods=10, freq="D") + i1.union(i2) # Works + i2.union(i1) # Fails with "AttributeError: can't set attribute" + + def test_union_sort_false(self): + tdi = timedelta_range("1day", periods=5) + + left = tdi[3:] + right = tdi[:3] + + # Check that we are testing the desired code path + assert left._can_fast_union(right) + + result = left.union(right) + tm.assert_index_equal(result, tdi) + + result = left.union(right, sort=False) + expected = TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"]) + tm.assert_index_equal(result, expected) + + def test_union_coverage(self): + idx = TimedeltaIndex(["3d", "1d", "2d"]) + ordered = TimedeltaIndex(idx.sort_values(), freq="infer") + result = ordered.union(idx) + tm.assert_index_equal(result, ordered) + + result = ordered[:0].union(ordered) + tm.assert_index_equal(result, ordered) + assert result.freq == ordered.freq + + def test_union_bug_1730(self): + rng_a = timedelta_range("1 day", periods=4, freq="3h") + rng_b = timedelta_range("1 day", periods=4, freq="4h") + + result = rng_a.union(rng_b) + exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b))) + tm.assert_index_equal(result, exp) + + def test_union_bug_1745(self): + left = TimedeltaIndex(["1 day 15:19:49.695000"]) + right = TimedeltaIndex( + ["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"] + ) + + result = left.union(right) + exp = TimedeltaIndex(sorted(set(left) | set(right))) + tm.assert_index_equal(result, exp) + + def test_union_bug_4564(self): + left = timedelta_range("1 day", "30d") + right = left + pd.offsets.Minute(15) + + result = left.union(right) + exp = TimedeltaIndex(sorted(set(left) | set(right))) + tm.assert_index_equal(result, exp) + + def test_union_freq_infer(self): + # When taking the union of two TimedeltaIndexes, we infer + # a freq even if the arguments don't have freq. This matches + # DatetimeIndex behavior. + tdi = timedelta_range("1 Day", periods=5) + left = tdi[[0, 1, 3, 4]] + right = tdi[[2, 3, 1]] + + assert left.freq is None + assert right.freq is None + + result = left.union(right) + tm.assert_index_equal(result, tdi) + assert result.freq == "D" + + def test_intersection_bug_1708(self): + index_1 = timedelta_range("1 day", periods=4, freq="h") + index_2 = index_1 + pd.offsets.Hour(5) + + result = index_1.intersection(index_2) + assert len(result) == 0 + + index_1 = timedelta_range("1 day", periods=4, freq="h") + index_2 = index_1 + pd.offsets.Hour(1) + + result = index_1.intersection(index_2) + expected = timedelta_range("1 day 01:00:00", periods=3, freq="h") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + def test_intersection_equal(self, sort): + # GH 24471 Test intersection outcome given the sort keyword + # for equal indices intersection should return the original index + first = timedelta_range("1 day", periods=4, freq="h") + second = timedelta_range("1 day", periods=4, freq="h") + intersect = first.intersection(second, sort=sort) + if sort is None: + tm.assert_index_equal(intersect, second.sort_values()) + tm.assert_index_equal(intersect, second) + + # Corner cases + inter = first.intersection(first, sort=sort) + assert inter is first + + @pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)]) + def test_intersection_zero_length(self, period_1, period_2, sort): + # GH 24471 test for non overlap the intersection should be zero length + index_1 = timedelta_range("1 day", periods=period_1, freq="h") + index_2 = timedelta_range("1 day", periods=period_2, freq="h") + expected = timedelta_range("1 day", periods=0, freq="h") + result = index_1.intersection(index_2, sort=sort) + tm.assert_index_equal(result, expected) + + def test_zero_length_input_index(self, sort): + # GH 24966 test for 0-len intersections are copied + index_1 = timedelta_range("1 day", periods=0, freq="h") + index_2 = timedelta_range("1 day", periods=3, freq="h") + result = index_1.intersection(index_2, sort=sort) + assert index_1 is not result + assert index_2 is not result + tm.assert_copy(result, index_1) + + @pytest.mark.parametrize( + "rng, expected", + # if target has the same name, it is preserved + [ + ( + timedelta_range("1 day", periods=5, freq="h", name="idx"), + timedelta_range("1 day", periods=4, freq="h", name="idx"), + ), + # if target name is different, it will be reset + ( + timedelta_range("1 day", periods=5, freq="h", name="other"), + timedelta_range("1 day", periods=4, freq="h", name=None), + ), + # if no overlap exists return empty index + ( + timedelta_range("1 day", periods=10, freq="h", name="idx")[5:], + TimedeltaIndex([], freq="h", name="idx"), + ), + ], + ) + def test_intersection(self, rng, expected, sort): + # GH 4690 (with tz) + base = timedelta_range("1 day", periods=4, freq="h", name="idx") + result = base.intersection(rng, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + @pytest.mark.parametrize( + "rng, expected", + # part intersection works + [ + ( + TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"), + TimedeltaIndex(["2 hour", "4 hour"], name="idx"), + ), + # reordered part intersection + ( + TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"), + TimedeltaIndex(["1 hour", "2 hour"], name=None), + ), + # reversed index + ( + TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[ + ::-1 + ], + TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"), + ), + ], + ) + def test_intersection_non_monotonic(self, rng, expected, sort): + # 24471 non-monotonic + base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx") + result = base.intersection(rng, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + # if reversed order, frequency is still the same + if all(base == rng[::-1]) and sort is None: + assert isinstance(result.freq, Hour) + else: + assert result.freq is None + + +class TestTimedeltaIndexDifference: + def test_difference_freq(self, sort): + # GH14323: Difference of TimedeltaIndex should not preserve frequency + + index = timedelta_range("0 days", "5 days", freq="D") + + other = timedelta_range("1 days", "4 days", freq="D") + expected = TimedeltaIndex(["0 days", "5 days"], freq=None) + idx_diff = index.difference(other, sort) + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + # preserve frequency when the difference is a contiguous + # subset of the original range + other = timedelta_range("2 days", "5 days", freq="D") + idx_diff = index.difference(other, sort) + expected = TimedeltaIndex(["0 days", "1 days"], freq="D") + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + def test_difference_sort(self, sort): + index = TimedeltaIndex( + ["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"] + ) + + other = timedelta_range("1 days", "4 days", freq="D") + idx_diff = index.difference(other, sort) + + expected = TimedeltaIndex(["5 days", "0 days"], freq=None) + + if sort is None: + expected = expected.sort_values() + + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) + + other = timedelta_range("2 days", "5 days", freq="D") + idx_diff = index.difference(other, sort) + expected = TimedeltaIndex(["1 days", "0 days"], freq=None) + + if sort is None: + expected = expected.sort_values() + + tm.assert_index_equal(idx_diff, expected) + tm.assert_attr_equal("freq", idx_diff, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py new file mode 100644 index 0000000000000000000000000000000000000000..f22bdb7a90516a7162ebdb1cc2d8cbfd9531b9e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/timedeltas/test_timedelta_range.py @@ -0,0 +1,173 @@ +import numpy as np +import pytest + +from pandas import ( + Timedelta, + TimedeltaIndex, + timedelta_range, + to_timedelta, +) +import pandas._testing as tm + +from pandas.tseries.offsets import ( + Day, + Second, +) + + +class TestTimedeltas: + def test_timedelta_range_unit(self): + # GH#49824 + tdi = timedelta_range("0 Days", periods=10, freq="100000D", unit="s") + exp_arr = (np.arange(10, dtype="i8") * 100_000).view("m8[D]").astype("m8[s]") + tm.assert_numpy_array_equal(tdi.to_numpy(), exp_arr) + + def test_timedelta_range(self): + expected = to_timedelta(np.arange(5), unit="D") + result = timedelta_range("0 days", periods=5, freq="D") + tm.assert_index_equal(result, expected) + + expected = to_timedelta(np.arange(11), unit="D") + result = timedelta_range("0 days", "10 days", freq="D") + tm.assert_index_equal(result, expected) + + expected = to_timedelta(np.arange(5), unit="D") + Second(2) + Day() + result = timedelta_range("1 days, 00:00:02", "5 days, 00:00:02", freq="D") + tm.assert_index_equal(result, expected) + + expected = to_timedelta([1, 3, 5, 7, 9], unit="D") + Second(2) + result = timedelta_range("1 days, 00:00:02", periods=5, freq="2D") + tm.assert_index_equal(result, expected) + + expected = to_timedelta(np.arange(50), unit="min") * 30 + result = timedelta_range("0 days", freq="30min", periods=50) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "depr_unit, unit", + [ + ("H", "hour"), + ("T", "minute"), + ("t", "minute"), + ("S", "second"), + ("L", "millisecond"), + ("l", "millisecond"), + ("U", "microsecond"), + ("u", "microsecond"), + ("N", "nanosecond"), + ("n", "nanosecond"), + ], + ) + def test_timedelta_units_H_T_S_L_U_N_deprecated(self, depr_unit, unit): + # GH#52536 + depr_msg = ( + f"'{depr_unit}' is deprecated and will be removed in a future version." + ) + + expected = to_timedelta(np.arange(5), unit=unit) + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = to_timedelta(np.arange(5), unit=depr_unit) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "periods, freq", [(3, "2D"), (5, "D"), (6, "19h12min"), (7, "16h"), (9, "12h")] + ) + def test_linspace_behavior(self, periods, freq): + # GH 20976 + result = timedelta_range(start="0 days", end="4 days", periods=periods) + expected = timedelta_range(start="0 days", end="4 days", freq=freq) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("msg_freq, freq", [("H", "19H12min"), ("T", "19h12T")]) + def test_timedelta_range_H_T_deprecated(self, freq, msg_freq): + # GH#52536 + msg = f"'{msg_freq}' is deprecated and will be removed in a future version." + + result = timedelta_range(start="0 days", end="4 days", periods=6) + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = timedelta_range(start="0 days", end="4 days", freq=freq) + tm.assert_index_equal(result, expected) + + def test_errors(self): + # not enough params + msg = ( + "Of the four parameters: start, end, periods, and freq, " + "exactly three must be specified" + ) + with pytest.raises(ValueError, match=msg): + timedelta_range(start="0 days") + + with pytest.raises(ValueError, match=msg): + timedelta_range(end="5 days") + + with pytest.raises(ValueError, match=msg): + timedelta_range(periods=2) + + with pytest.raises(ValueError, match=msg): + timedelta_range() + + # too many params + with pytest.raises(ValueError, match=msg): + timedelta_range(start="0 days", end="5 days", periods=10, freq="h") + + @pytest.mark.parametrize( + "start, end, freq, expected_periods", + [ + ("1D", "10D", "2D", (10 - 1) // 2 + 1), + ("2D", "30D", "3D", (30 - 2) // 3 + 1), + ("2s", "50s", "5s", (50 - 2) // 5 + 1), + # tests that worked before GH 33498: + ("4D", "16D", "3D", (16 - 4) // 3 + 1), + ("8D", "16D", "40s", (16 * 3600 * 24 - 8 * 3600 * 24) // 40 + 1), + ], + ) + def test_timedelta_range_freq_divide_end(self, start, end, freq, expected_periods): + # GH 33498 only the cases where `(end % freq) == 0` used to fail + res = timedelta_range(start=start, end=end, freq=freq) + assert Timedelta(start) == res[0] + assert Timedelta(end) >= res[-1] + assert len(res) == expected_periods + + def test_timedelta_range_infer_freq(self): + # https://github.com/pandas-dev/pandas/issues/35897 + result = timedelta_range("0s", "1s", periods=31) + assert result.freq is None + + @pytest.mark.parametrize( + "freq_depr, start, end, expected_values, expected_freq", + [ + ( + "3.5S", + "05:03:01", + "05:03:10", + ["0 days 05:03:01", "0 days 05:03:04.500000", "0 days 05:03:08"], + "3500ms", + ), + ( + "2.5T", + "5 hours", + "5 hours 8 minutes", + [ + "0 days 05:00:00", + "0 days 05:02:30", + "0 days 05:05:00", + "0 days 05:07:30", + ], + "150s", + ), + ], + ) + def test_timedelta_range_deprecated_freq( + self, freq_depr, start, end, expected_values, expected_freq + ): + # GH#52536 + msg = ( + f"'{freq_depr[-1]}' is deprecated and will be removed in a future version." + ) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = timedelta_range(start=start, end=end, freq=freq_depr) + expected = TimedeltaIndex( + expected_values, dtype="timedelta64[ns]", freq=expected_freq + ) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..582102c7c49ce84a90d149aa0b0525293799c07a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85413a69b62d6a5d79b2e9183047d6485479a123 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1481f6b055da23995f7636b1e25b0b12eab4058c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf8accfdd4bca9bee56d43ecc07a79a1d4c5ebb9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a693a5d76c21c1e85d031e285f71fc711225b15 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..def36070dbfe635af03c54d70590055e1083c3fa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..429f87eb653964ba4873292805f99b4eeb24ca10 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5933950648a906bdbf6a8eb648b10317f045c65b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..673acb0d639746a4a85c92a817d30168690fdcd5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1883e0e7b998d7a427f6f6510b0c370c284a03e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31c44315ad6f2f1279bda08e549dbf84f097bf43 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b6724ed5d6db35cccdf69d52d17630e03fd2d9f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..961d6592acc47e1d99cf7460d707585ed29e686c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..619e15da4b9d534be6566523c15b4bf750408e16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0adde35c3bc69258b1c481c6543fd84e6952bb8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c597c28941f0b86e67ede487fc430d737deb347 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f42fe7f238857c88ed49bf68194a335a7aa58679 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc2ce5aeca0eef3b01206452009606a16135a8a7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e186463a5f39593e43c78a88c42174ba66a5c8fa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcfb7f3dbc886793b2b6046038aaa09ff454c9e2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11dfbf122cc6908c23ec43268f7ae2b40bab450b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc7e71885b13b3f1ce388e50ec2ba243dea9c074 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ba8f2a508247477f7ffd6f7bb096fab7d113a1f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..62b8c59ba8855b2ef4273a33b475b2e16b7b3d1b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py @@ -0,0 +1,7 @@ +import pytest + + +@pytest.fixture(params=[True, False]) +def sort(request): + """Boolean sort keyword for concat and DataFrame.append.""" + return request.param diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py new file mode 100644 index 0000000000000000000000000000000000000000..81ca227fb7afb9768909f3cca7907fdaf74c430a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py @@ -0,0 +1,389 @@ +import datetime as dt +from itertools import combinations + +import dateutil +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, + concat, + isna, +) +import pandas._testing as tm + + +class TestAppend: + def test_append(self, sort, float_frame): + mixed_frame = float_frame.copy() + mixed_frame["foo"] = "bar" + + begin_index = float_frame.index[:5] + end_index = float_frame.index[5:] + + begin_frame = float_frame.reindex(begin_index) + end_frame = float_frame.reindex(end_index) + + appended = begin_frame._append(end_frame) + tm.assert_almost_equal(appended["A"], float_frame["A"]) + + del end_frame["A"] + partial_appended = begin_frame._append(end_frame, sort=sort) + assert "A" in partial_appended + + partial_appended = end_frame._append(begin_frame, sort=sort) + assert "A" in partial_appended + + # mixed type handling + appended = mixed_frame[:5]._append(mixed_frame[5:]) + tm.assert_frame_equal(appended, mixed_frame) + + # what to test here + mixed_appended = mixed_frame[:5]._append(float_frame[5:], sort=sort) + mixed_appended2 = float_frame[:5]._append(mixed_frame[5:], sort=sort) + + # all equal except 'foo' column + tm.assert_frame_equal( + mixed_appended.reindex(columns=["A", "B", "C", "D"]), + mixed_appended2.reindex(columns=["A", "B", "C", "D"]), + ) + + def test_append_empty(self, float_frame): + empty = DataFrame() + + appended = float_frame._append(empty) + tm.assert_frame_equal(float_frame, appended) + assert appended is not float_frame + + appended = empty._append(float_frame) + tm.assert_frame_equal(float_frame, appended) + assert appended is not float_frame + + def test_append_overlap_raises(self, float_frame): + msg = "Indexes have overlapping values" + with pytest.raises(ValueError, match=msg): + float_frame._append(float_frame, verify_integrity=True) + + def test_append_new_columns(self): + # see gh-6129: new columns + df = DataFrame({"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}}) + row = Series([5, 6, 7], index=["a", "b", "c"], name="z") + expected = DataFrame( + { + "a": {"x": 1, "y": 2, "z": 5}, + "b": {"x": 3, "y": 4, "z": 6}, + "c": {"z": 7}, + } + ) + result = df._append(row) + tm.assert_frame_equal(result, expected) + + def test_append_length0_frame(self, sort): + df = DataFrame(columns=["A", "B", "C"]) + df3 = DataFrame(index=[0, 1], columns=["A", "B"]) + df5 = df._append(df3, sort=sort) + + expected = DataFrame(index=[0, 1], columns=["A", "B", "C"]) + tm.assert_frame_equal(df5, expected) + + def test_append_records(self): + arr1 = np.zeros((2,), dtype=("i4,f4,S10")) + arr1[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")] + + arr2 = np.zeros((3,), dtype=("i4,f4,S10")) + arr2[:] = [(3, 4.0, "foo"), (5, 6.0, "bar"), (7.0, 8.0, "baz")] + + df1 = DataFrame(arr1) + df2 = DataFrame(arr2) + + result = df1._append(df2, ignore_index=True) + expected = DataFrame(np.concatenate((arr1, arr2))) + tm.assert_frame_equal(result, expected) + + # rewrite sort fixture, since we also want to test default of None + def test_append_sorts(self, sort): + df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) + df2 = DataFrame({"a": [1, 2], "c": [3, 4]}, index=[2, 3]) + + result = df1._append(df2, sort=sort) + + # for None / True + expected = DataFrame( + {"b": [1, 2, None, None], "a": [1, 2, 1, 2], "c": [None, None, 3, 4]}, + columns=["a", "b", "c"], + ) + if sort is False: + expected = expected[["b", "a", "c"]] + tm.assert_frame_equal(result, expected) + + def test_append_different_columns(self, sort): + df = DataFrame( + { + "bools": np.random.default_rng(2).standard_normal(10) > 0, + "ints": np.random.default_rng(2).integers(0, 10, 10), + "floats": np.random.default_rng(2).standard_normal(10), + "strings": ["foo", "bar"] * 5, + } + ) + + a = df[:5].loc[:, ["bools", "ints", "floats"]] + b = df[5:].loc[:, ["strings", "ints", "floats"]] + + appended = a._append(b, sort=sort) + assert isna(appended["strings"][0:4]).all() + assert isna(appended["bools"][5:]).all() + + def test_append_many(self, sort, float_frame): + chunks = [ + float_frame[:5], + float_frame[5:10], + float_frame[10:15], + float_frame[15:], + ] + + result = chunks[0]._append(chunks[1:]) + tm.assert_frame_equal(result, float_frame) + + chunks[-1] = chunks[-1].copy() + chunks[-1]["foo"] = "bar" + result = chunks[0]._append(chunks[1:], sort=sort) + tm.assert_frame_equal(result.loc[:, float_frame.columns], float_frame) + assert (result["foo"][15:] == "bar").all() + assert result["foo"][:15].isna().all() + + def test_append_preserve_index_name(self): + # #980 + df1 = DataFrame(columns=["A", "B", "C"]) + df1 = df1.set_index(["A"]) + df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]], columns=["A", "B", "C"]) + df2 = df2.set_index(["A"]) + + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df1._append(df2) + assert result.index.name == "A" + + indexes_can_append = [ + pd.RangeIndex(3), + Index([4, 5, 6]), + Index([4.5, 5.5, 6.5]), + Index(list("abc")), + pd.CategoricalIndex("A B C".split()), + pd.CategoricalIndex("D E F".split(), ordered=True), + pd.IntervalIndex.from_breaks([7, 8, 9, 10]), + pd.DatetimeIndex( + [ + dt.datetime(2013, 1, 3, 0, 0), + dt.datetime(2013, 1, 3, 6, 10), + dt.datetime(2013, 1, 3, 7, 12), + ] + ), + pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]), + ] + + @pytest.mark.parametrize( + "index", indexes_can_append, ids=lambda x: type(x).__name__ + ) + def test_append_same_columns_type(self, index): + # GH18359 + + # df wider than ser + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index) + ser_index = index[:2] + ser = Series([7, 8], index=ser_index, name=2) + result = df._append(ser) + expected = DataFrame( + [[1, 2, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index + ) + # integer dtype is preserved for columns present in ser.index + assert expected.dtypes.iloc[0].kind == "i" + assert expected.dtypes.iloc[1].kind == "i" + + tm.assert_frame_equal(result, expected) + + # ser wider than df + ser_index = index + index = index[:2] + df = DataFrame([[1, 2], [4, 5]], columns=index) + ser = Series([7, 8, 9], index=ser_index, name=2) + result = df._append(ser) + expected = DataFrame( + [[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]], + index=[0, 1, 2], + columns=ser_index, + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "df_columns, series_index", + combinations(indexes_can_append, r=2), + ids=lambda x: type(x).__name__, + ) + def test_append_different_columns_types(self, df_columns, series_index): + # GH18359 + # See also test 'test_append_different_columns_types_raises' below + # for errors raised when appending + + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns) + ser = Series([7, 8, 9], index=series_index, name=2) + + result = df._append(ser) + idx_diff = ser.index.difference(df_columns) + combined_columns = Index(df_columns.tolist()).append(idx_diff) + expected = DataFrame( + [ + [1.0, 2.0, 3.0, np.nan, np.nan, np.nan], + [4, 5, 6, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, 7, 8, 9], + ], + index=[0, 1, 2], + columns=combined_columns, + ) + tm.assert_frame_equal(result, expected) + + def test_append_dtype_coerce(self, sort): + # GH 4993 + # appending with datetime will incorrectly convert datetime64 + + df1 = DataFrame( + index=[1, 2], + data=[dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0)], + columns=["start_time"], + ) + df2 = DataFrame( + index=[4, 5], + data=[ + [dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 3, 6, 10)], + [dt.datetime(2013, 1, 4, 0, 0), dt.datetime(2013, 1, 4, 7, 10)], + ], + columns=["start_time", "end_time"], + ) + + expected = concat( + [ + Series( + [ + pd.NaT, + pd.NaT, + dt.datetime(2013, 1, 3, 6, 10), + dt.datetime(2013, 1, 4, 7, 10), + ], + name="end_time", + ), + Series( + [ + dt.datetime(2013, 1, 1, 0, 0), + dt.datetime(2013, 1, 2, 0, 0), + dt.datetime(2013, 1, 3, 0, 0), + dt.datetime(2013, 1, 4, 0, 0), + ], + name="start_time", + ), + ], + axis=1, + sort=sort, + ) + result = df1._append(df2, ignore_index=True, sort=sort) + if sort: + expected = expected[["end_time", "start_time"]] + else: + expected = expected[["start_time", "end_time"]] + + tm.assert_frame_equal(result, expected) + + def test_append_missing_column_proper_upcast(self, sort): + df1 = DataFrame({"A": np.array([1, 2, 3, 4], dtype="i8")}) + df2 = DataFrame({"B": np.array([True, False, True, False], dtype=bool)}) + + appended = df1._append(df2, ignore_index=True, sort=sort) + assert appended["A"].dtype == "f8" + assert appended["B"].dtype == "O" + + def test_append_empty_frame_to_series_with_dateutil_tz(self): + # GH 23682 + date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc()) + ser = Series({"a": 1.0, "b": 2.0, "date": date}) + df = DataFrame(columns=["c", "d"]) + result_a = df._append(ser, ignore_index=True) + expected = DataFrame( + [[np.nan, np.nan, 1.0, 2.0, date]], columns=["c", "d", "a", "b", "date"] + ) + # These columns get cast to object after append + expected["c"] = expected["c"].astype(object) + expected["d"] = expected["d"].astype(object) + tm.assert_frame_equal(result_a, expected) + + expected = DataFrame( + [[np.nan, np.nan, 1.0, 2.0, date]] * 2, columns=["c", "d", "a", "b", "date"] + ) + expected["c"] = expected["c"].astype(object) + expected["d"] = expected["d"].astype(object) + result_b = result_a._append(ser, ignore_index=True) + tm.assert_frame_equal(result_b, expected) + + result = df._append([ser, ser], ignore_index=True) + tm.assert_frame_equal(result, expected) + + def test_append_empty_tz_frame_with_datetime64ns(self, using_array_manager): + # https://github.com/pandas-dev/pandas/issues/35460 + df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + + # pd.NaT gets inferred as tz-naive, so append result is tz-naive + result = df._append({"a": pd.NaT}, ignore_index=True) + if using_array_manager: + expected = DataFrame({"a": [pd.NaT]}, dtype=object) + else: + expected = DataFrame({"a": [np.nan]}, dtype=object) + tm.assert_frame_equal(result, expected) + + # also test with typed value to append + df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + other = Series({"a": pd.NaT}, dtype="datetime64[ns]") + result = df._append(other, ignore_index=True) + tm.assert_frame_equal(result, expected) + + # mismatched tz + other = Series({"a": pd.NaT}, dtype="datetime64[ns, US/Pacific]") + result = df._append(other, ignore_index=True) + expected = DataFrame({"a": [pd.NaT]}).astype(object) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"] + ) + @pytest.mark.parametrize("val", [1, "NaT"]) + def test_append_empty_frame_with_timedelta64ns_nat( + self, dtype_str, val, using_array_manager + ): + # https://github.com/pandas-dev/pandas/issues/35460 + df = DataFrame(columns=["a"]).astype(dtype_str) + + other = DataFrame({"a": [np.timedelta64(val, "ns")]}) + result = df._append(other, ignore_index=True) + + expected = other.astype(object) + if isinstance(val, str) and dtype_str != "int64" and not using_array_manager: + # TODO: expected used to be `other.astype(object)` which is a more + # reasonable result. This was changed when tightening + # assert_frame_equal's treatment of mismatched NAs to match the + # existing behavior. + expected = DataFrame({"a": [np.nan]}, dtype=object) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"] + ) + @pytest.mark.parametrize("val", [1, "NaT"]) + def test_append_frame_with_timedelta64ns_nat(self, dtype_str, val): + # https://github.com/pandas-dev/pandas/issues/35460 + df = DataFrame({"a": pd.array([1], dtype=dtype_str)}) + + other = DataFrame({"a": [np.timedelta64(val, "ns")]}) + result = df._append(other, ignore_index=True) + + expected = DataFrame({"a": [df.iloc[0, 0], other.iloc[0, 0]]}, dtype=object) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py new file mode 100644 index 0000000000000000000000000000000000000000..31c3ef317622214efe302aae4b87a6bb6592a965 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py @@ -0,0 +1,753 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.fixture( + params=list( + { + "bool": [True, False, True], + "int64": [1, 2, 3], + "float64": [1.1, np.nan, 3.3], + "category": Categorical(["X", "Y", "Z"]), + "object": ["a", "b", "c"], + "datetime64[ns]": [ + pd.Timestamp("2011-01-01"), + pd.Timestamp("2011-01-02"), + pd.Timestamp("2011-01-03"), + ], + "datetime64[ns, US/Eastern]": [ + pd.Timestamp("2011-01-01", tz="US/Eastern"), + pd.Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timestamp("2011-01-03", tz="US/Eastern"), + ], + "timedelta64[ns]": [ + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + ], + "period[M]": [ + pd.Period("2011-01", freq="M"), + pd.Period("2011-02", freq="M"), + pd.Period("2011-03", freq="M"), + ], + }.items() + ) +) +def item(request): + key, data = request.param + return key, data + + +@pytest.fixture +def item2(item): + return item + + +class TestConcatAppendCommon: + """ + Test common dtype coercion rules between concat and append. + """ + + def test_dtypes(self, item, index_or_series, using_infer_string): + # to confirm test case covers intended dtypes + typ, vals = item + obj = index_or_series(vals) + if typ == "object" and using_infer_string: + typ = "string" + if isinstance(obj, Index): + assert obj.dtype == typ + elif isinstance(obj, Series): + if typ.startswith("period"): + assert obj.dtype == "Period[M]" + else: + assert obj.dtype == typ + + def test_concatlike_same_dtypes(self, item): + # GH 13660 + typ1, vals1 = item + + vals2 = vals1 + vals3 = vals1 + + if typ1 == "category": + exp_data = Categorical(list(vals1) + list(vals2)) + exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3)) + else: + exp_data = vals1 + vals2 + exp_data3 = vals1 + vals2 + vals3 + + # ----- Index ----- # + + # index.append + res = Index(vals1).append(Index(vals2)) + exp = Index(exp_data) + tm.assert_index_equal(res, exp) + + # 3 elements + res = Index(vals1).append([Index(vals2), Index(vals3)]) + exp = Index(exp_data3) + tm.assert_index_equal(res, exp) + + # index.append name mismatch + i1 = Index(vals1, name="x") + i2 = Index(vals2, name="y") + res = i1.append(i2) + exp = Index(exp_data) + tm.assert_index_equal(res, exp) + + # index.append name match + i1 = Index(vals1, name="x") + i2 = Index(vals2, name="x") + res = i1.append(i2) + exp = Index(exp_data, name="x") + tm.assert_index_equal(res, exp) + + # cannot append non-index + with pytest.raises(TypeError, match="all inputs must be Index"): + Index(vals1).append(vals2) + + with pytest.raises(TypeError, match="all inputs must be Index"): + Index(vals1).append([Index(vals2), vals3]) + + # ----- Series ----- # + + # series.append + res = Series(vals1)._append(Series(vals2), ignore_index=True) + exp = Series(exp_data) + tm.assert_series_equal(res, exp, check_index_type=True) + + # concat + res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True) + tm.assert_series_equal(res, exp, check_index_type=True) + + # 3 elements + res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True) + exp = Series(exp_data3) + tm.assert_series_equal(res, exp) + + res = pd.concat( + [Series(vals1), Series(vals2), Series(vals3)], + ignore_index=True, + ) + tm.assert_series_equal(res, exp) + + # name mismatch + s1 = Series(vals1, name="x") + s2 = Series(vals2, name="y") + res = s1._append(s2, ignore_index=True) + exp = Series(exp_data) + tm.assert_series_equal(res, exp, check_index_type=True) + + res = pd.concat([s1, s2], ignore_index=True) + tm.assert_series_equal(res, exp, check_index_type=True) + + # name match + s1 = Series(vals1, name="x") + s2 = Series(vals2, name="x") + res = s1._append(s2, ignore_index=True) + exp = Series(exp_data, name="x") + tm.assert_series_equal(res, exp, check_index_type=True) + + res = pd.concat([s1, s2], ignore_index=True) + tm.assert_series_equal(res, exp, check_index_type=True) + + # cannot append non-index + msg = ( + r"cannot concatenate object of type '.+'; " + "only Series and DataFrame objs are valid" + ) + with pytest.raises(TypeError, match=msg): + Series(vals1)._append(vals2) + + with pytest.raises(TypeError, match=msg): + Series(vals1)._append([Series(vals2), vals3]) + + with pytest.raises(TypeError, match=msg): + pd.concat([Series(vals1), vals2]) + + with pytest.raises(TypeError, match=msg): + pd.concat([Series(vals1), Series(vals2), vals3]) + + def test_concatlike_dtypes_coercion(self, item, item2, request): + # GH 13660 + typ1, vals1 = item + typ2, vals2 = item2 + + vals3 = vals2 + + # basically infer + exp_index_dtype = None + exp_series_dtype = None + + if typ1 == typ2: + pytest.skip("same dtype is tested in test_concatlike_same_dtypes") + elif typ1 == "category" or typ2 == "category": + pytest.skip("categorical type tested elsewhere") + + # specify expected dtype + if typ1 == "bool" and typ2 in ("int64", "float64"): + # series coerces to numeric based on numpy rule + # index doesn't because bool is object dtype + exp_series_dtype = typ2 + mark = pytest.mark.xfail(reason="GH#39187 casting to object") + request.applymarker(mark) + elif typ2 == "bool" and typ1 in ("int64", "float64"): + exp_series_dtype = typ1 + mark = pytest.mark.xfail(reason="GH#39187 casting to object") + request.applymarker(mark) + elif typ1 in {"datetime64[ns, US/Eastern]", "timedelta64[ns]"} or typ2 in { + "datetime64[ns, US/Eastern]", + "timedelta64[ns]", + }: + exp_index_dtype = object + exp_series_dtype = object + + exp_data = vals1 + vals2 + exp_data3 = vals1 + vals2 + vals3 + + # ----- Index ----- # + + # index.append + # GH#39817 + res = Index(vals1).append(Index(vals2)) + exp = Index(exp_data, dtype=exp_index_dtype) + tm.assert_index_equal(res, exp) + + # 3 elements + res = Index(vals1).append([Index(vals2), Index(vals3)]) + exp = Index(exp_data3, dtype=exp_index_dtype) + tm.assert_index_equal(res, exp) + + # ----- Series ----- # + + # series._append + # GH#39817 + res = Series(vals1)._append(Series(vals2), ignore_index=True) + exp = Series(exp_data, dtype=exp_series_dtype) + tm.assert_series_equal(res, exp, check_index_type=True) + + # concat + # GH#39817 + res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True) + tm.assert_series_equal(res, exp, check_index_type=True) + + # 3 elements + # GH#39817 + res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True) + exp = Series(exp_data3, dtype=exp_series_dtype) + tm.assert_series_equal(res, exp) + + # GH#39817 + res = pd.concat( + [Series(vals1), Series(vals2), Series(vals3)], + ignore_index=True, + ) + tm.assert_series_equal(res, exp) + + def test_concatlike_common_coerce_to_pandas_object(self): + # GH 13626 + # result must be Timestamp/Timedelta, not datetime.datetime/timedelta + dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"]) + tdi = pd.TimedeltaIndex(["1 days", "2 days"]) + + exp = Index( + [ + pd.Timestamp("2011-01-01"), + pd.Timestamp("2011-01-02"), + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + ] + ) + + res = dti.append(tdi) + tm.assert_index_equal(res, exp) + assert isinstance(res[0], pd.Timestamp) + assert isinstance(res[-1], pd.Timedelta) + + dts = Series(dti) + tds = Series(tdi) + res = dts._append(tds) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + assert isinstance(res.iloc[0], pd.Timestamp) + assert isinstance(res.iloc[-1], pd.Timedelta) + + res = pd.concat([dts, tds]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + assert isinstance(res.iloc[0], pd.Timestamp) + assert isinstance(res.iloc[-1], pd.Timedelta) + + def test_concatlike_datetimetz(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH 7795 + dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) + dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz) + + exp = pd.DatetimeIndex( + ["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz + ) + + res = dti1.append(dti2) + tm.assert_index_equal(res, exp) + + dts1 = Series(dti1) + dts2 = Series(dti2) + res = dts1._append(dts2) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([dts1, dts2]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + @pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"]) + def test_concatlike_datetimetz_short(self, tz): + # GH#7795 + ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz) + ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz) + df1 = DataFrame(0, index=ix1, columns=["A", "B"]) + df2 = DataFrame(0, index=ix2, columns=["A", "B"]) + + exp_idx = pd.DatetimeIndex( + ["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"], + tz=tz, + ).as_unit("ns") + exp = DataFrame(0, index=exp_idx, columns=["A", "B"]) + + tm.assert_frame_equal(df1._append(df2), exp) + tm.assert_frame_equal(pd.concat([df1, df2]), exp) + + def test_concatlike_datetimetz_to_object(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH 13660 + + # different tz coerces to object + dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) + dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"]) + + exp = Index( + [ + pd.Timestamp("2011-01-01", tz=tz), + pd.Timestamp("2011-01-02", tz=tz), + pd.Timestamp("2012-01-01"), + pd.Timestamp("2012-01-02"), + ], + dtype=object, + ) + + res = dti1.append(dti2) + tm.assert_index_equal(res, exp) + + dts1 = Series(dti1) + dts2 = Series(dti2) + res = dts1._append(dts2) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([dts1, dts2]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + # different tz + dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific") + + exp = Index( + [ + pd.Timestamp("2011-01-01", tz=tz), + pd.Timestamp("2011-01-02", tz=tz), + pd.Timestamp("2012-01-01", tz="US/Pacific"), + pd.Timestamp("2012-01-02", tz="US/Pacific"), + ], + dtype=object, + ) + + res = dti1.append(dti3) + tm.assert_index_equal(res, exp) + + dts1 = Series(dti1) + dts3 = Series(dti3) + res = dts1._append(dts3) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([dts1, dts3]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + def test_concatlike_common_period(self): + # GH 13660 + pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") + pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M") + + exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M") + + res = pi1.append(pi2) + tm.assert_index_equal(res, exp) + + ps1 = Series(pi1) + ps2 = Series(pi2) + res = ps1._append(ps2) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([ps1, ps2]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + def test_concatlike_common_period_diff_freq_to_object(self): + # GH 13221 + pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") + pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D") + + exp = Index( + [ + pd.Period("2011-01", freq="M"), + pd.Period("2011-02", freq="M"), + pd.Period("2012-01-01", freq="D"), + pd.Period("2012-02-01", freq="D"), + ], + dtype=object, + ) + + res = pi1.append(pi2) + tm.assert_index_equal(res, exp) + + ps1 = Series(pi1) + ps2 = Series(pi2) + res = ps1._append(ps2) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([ps1, ps2]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + def test_concatlike_common_period_mixed_dt_to_object(self): + # GH 13221 + # different datetimelike + pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") + tdi = pd.TimedeltaIndex(["1 days", "2 days"]) + exp = Index( + [ + pd.Period("2011-01", freq="M"), + pd.Period("2011-02", freq="M"), + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + ], + dtype=object, + ) + + res = pi1.append(tdi) + tm.assert_index_equal(res, exp) + + ps1 = Series(pi1) + tds = Series(tdi) + res = ps1._append(tds) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([ps1, tds]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + # inverse + exp = Index( + [ + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Period("2011-01", freq="M"), + pd.Period("2011-02", freq="M"), + ], + dtype=object, + ) + + res = tdi.append(pi1) + tm.assert_index_equal(res, exp) + + ps1 = Series(pi1) + tds = Series(tdi) + res = tds._append(ps1) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([tds, ps1]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + def test_concat_categorical(self): + # GH 13524 + + # same categories -> category + s1 = Series([1, 2, np.nan], dtype="category") + s2 = Series([2, 1, 2], dtype="category") + + exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category") + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + # partially different categories => not-category + s1 = Series([3, 2], dtype="category") + s2 = Series([2, 1], dtype="category") + + exp = Series([3, 2, 2, 1]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + # completely different categories (same dtype) => not-category + s1 = Series([10, 11, np.nan], dtype="category") + s2 = Series([np.nan, 1, 3, 2], dtype="category") + + exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + def test_union_categorical_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19096 + a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"])) + b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"])) + result = pd.concat([a, b], ignore_index=True) + expected = Series( + Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"]) + ) + tm.assert_series_equal(result, expected) + + def test_concat_categorical_coercion(self): + # GH 13524 + + # category + not-category => not-category + s1 = Series([1, 2, np.nan], dtype="category") + s2 = Series([2, 1, 2]) + + exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + # result shouldn't be affected by 1st elem dtype + exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # all values are not in category => not-category + s1 = Series([3, 2], dtype="category") + s2 = Series([2, 1]) + + exp = Series([3, 2, 2, 1]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series([2, 1, 3, 2]) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # completely different categories => not-category + s1 = Series([10, 11, np.nan], dtype="category") + s2 = Series([1, 3, 2]) + + exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # different dtype => not-category + s1 = Series([10, 11, np.nan], dtype="category") + s2 = Series(["a", "b", "c"]) + + exp = Series([10, 11, np.nan, "a", "b", "c"]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series(["a", "b", "c", 10, 11, np.nan]) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # if normal series only contains NaN-likes => not-category + s1 = Series([10, 11], dtype="category") + s2 = Series([np.nan, np.nan, np.nan]) + + exp = Series([10, 11, np.nan, np.nan, np.nan]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series([np.nan, np.nan, np.nan, 10, 11]) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + def test_concat_categorical_3elem_coercion(self): + # GH 13524 + + # mixed dtypes => not-category + s1 = Series([1, 2, np.nan], dtype="category") + s2 = Series([2, 1, 2], dtype="category") + s3 = Series([1, 2, 1, 2, np.nan]) + + exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float") + tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) + tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp) + + exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float") + tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp) + + # values are all in either category => not-category + s1 = Series([4, 5, 6], dtype="category") + s2 = Series([1, 2, 3], dtype="category") + s3 = Series([1, 3, 4]) + + exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4]) + tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) + tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp) + + exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3]) + tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp) + + # values are all in either category => not-category + s1 = Series([4, 5, 6], dtype="category") + s2 = Series([1, 2, 3], dtype="category") + s3 = Series([10, 11, 12]) + + exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12]) + tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) + tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp) + + exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3]) + tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp) + + def test_concat_categorical_multi_coercion(self): + # GH 13524 + + s1 = Series([1, 3], dtype="category") + s2 = Series([3, 4], dtype="category") + s3 = Series([2, 3]) + s4 = Series([2, 2], dtype="category") + s5 = Series([1, np.nan]) + s6 = Series([1, 3, 2], dtype="category") + + # mixed dtype, values are all in categories => not-category + exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2]) + res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True) + tm.assert_series_equal(res, exp) + res = s1._append([s2, s3, s4, s5, s6], ignore_index=True) + tm.assert_series_equal(res, exp) + + exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3]) + res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True) + tm.assert_series_equal(res, exp) + res = s6._append([s5, s4, s3, s2, s1], ignore_index=True) + tm.assert_series_equal(res, exp) + + def test_concat_categorical_ordered(self): + # GH 13524 + + s1 = Series(Categorical([1, 2, np.nan], ordered=True)) + s2 = Series(Categorical([2, 1, 2], ordered=True)) + + exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True)) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True)) + tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s1._append([s2, s1], ignore_index=True), exp) + + def test_concat_categorical_coercion_nan(self): + # GH 13524 + + # some edge cases + # category + not-category => not category + s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category") + s2 = Series([np.nan, 1]) + + exp = Series([np.nan, np.nan, np.nan, 1]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + s1 = Series([1, np.nan], dtype="category") + s2 = Series([np.nan, np.nan]) + + exp = Series([1, np.nan, np.nan, np.nan], dtype="float") + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + # mixed dtype, all nan-likes => not-category + s1 = Series([np.nan, np.nan], dtype="category") + s2 = Series([np.nan, np.nan]) + + exp = Series([np.nan, np.nan, np.nan, np.nan]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # all category nan-likes => category + s1 = Series([np.nan, np.nan], dtype="category") + s2 = Series([np.nan, np.nan], dtype="category") + + exp = Series([np.nan, np.nan, np.nan, np.nan], dtype="category") + + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + def test_concat_categorical_empty(self): + # GH 13524 + + s1 = Series([], dtype="category") + s2 = Series([1, 2], dtype="category") + + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) + tm.assert_series_equal(s1._append(s2, ignore_index=True), s2) + + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2) + tm.assert_series_equal(s2._append(s1, ignore_index=True), s2) + + s1 = Series([], dtype="category") + s2 = Series([], dtype="category") + + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) + tm.assert_series_equal(s1._append(s2, ignore_index=True), s2) + + s1 = Series([], dtype="category") + s2 = Series([], dtype="object") + + # different dtype => not-category + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) + tm.assert_series_equal(s1._append(s2, ignore_index=True), s2) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2) + tm.assert_series_equal(s2._append(s1, ignore_index=True), s2) + + s1 = Series([], dtype="category") + s2 = Series([np.nan, np.nan]) + + # empty Series is ignored + exp = Series([np.nan, np.nan]) + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + def test_categorical_concat_append(self): + cat = Categorical(["a", "b"], categories=["a", "b"]) + vals = [1, 2] + df = DataFrame({"cats": cat, "vals": vals}) + cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"]) + vals2 = [1, 2, 1, 2] + exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1])) + + tm.assert_frame_equal(pd.concat([df, df]), exp) + tm.assert_frame_equal(df._append(df), exp) + + # GH 13524 can concat different categories + cat3 = Categorical(["a", "b"], categories=["a", "b", "c"]) + vals3 = [1, 2] + df_different_categories = DataFrame({"cats": cat3, "vals": vals3}) + + res = pd.concat([df, df_different_categories], ignore_index=True) + exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]}) + tm.assert_frame_equal(res, exp) + + res = df._append(df_different_categories, ignore_index=True) + tm.assert_frame_equal(res, exp) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..bbaaf0abecfbdff32d5c7526e3c8c37c71e92cc9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py @@ -0,0 +1,273 @@ +from datetime import datetime + +import numpy as np + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestCategoricalConcat: + def test_categorical_concat(self, sort): + # See GH 10177 + df1 = DataFrame( + np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"] + ) + + df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"]) + + cat_values = ["one", "one", "two", "one", "two", "two", "one"] + df2["h"] = Series(Categorical(cat_values)) + + res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort) + exp = DataFrame( + { + "a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12], + "b": [ + 1, + 4, + 7, + 10, + 13, + 16, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + ], + "c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13], + "h": [None] * 6 + cat_values, + } + ) + exp["h"] = exp["h"].astype(df2["h"].dtype) + tm.assert_frame_equal(res, exp) + + def test_categorical_concat_dtypes(self, using_infer_string): + # GH8143 + index = ["cat", "obj", "num"] + cat = Categorical(["a", "b", "c"]) + obj = Series(["a", "b", "c"]) + num = Series([1, 2, 3]) + df = pd.concat([Series(cat), obj, num], axis=1, keys=index) + + result = df.dtypes == ( + object if not using_infer_string else "string[pyarrow_numpy]" + ) + expected = Series([False, True, False], index=index) + tm.assert_series_equal(result, expected) + + result = df.dtypes == "int64" + expected = Series([False, False, True], index=index) + tm.assert_series_equal(result, expected) + + result = df.dtypes == "category" + expected = Series([True, False, False], index=index) + tm.assert_series_equal(result, expected) + + def test_concat_categoricalindex(self): + # GH 16111, categories that aren't lexsorted + categories = [9, 0, 1, 2, 3] + + a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories)) + b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories)) + c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories)) + + result = pd.concat([a, b, c], axis=1) + + exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories) + exp = DataFrame( + { + 0: [1, 1, np.nan, np.nan], + 1: [np.nan, 2, 2, np.nan], + 2: [np.nan, np.nan, 3, 3], + }, + columns=[0, 1, 2], + index=exp_idx, + ) + tm.assert_frame_equal(result, exp) + + def test_categorical_concat_preserve(self): + # GH 8641 series concat not preserving category dtype + # GH 13524 can concat different categories + s = Series(list("abc"), dtype="category") + s2 = Series(list("abd"), dtype="category") + + exp = Series(list("abcabd")) + res = pd.concat([s, s2], ignore_index=True) + tm.assert_series_equal(res, exp) + + exp = Series(list("abcabc"), dtype="category") + res = pd.concat([s, s], ignore_index=True) + tm.assert_series_equal(res, exp) + + exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category") + res = pd.concat([s, s]) + tm.assert_series_equal(res, exp) + + a = Series(np.arange(6, dtype="int64")) + b = Series(list("aabbca")) + + df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))}) + res = pd.concat([df2, df2]) + exp = DataFrame( + { + "A": pd.concat([a, a]), + "B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))), + } + ) + tm.assert_frame_equal(res, exp) + + def test_categorical_index_preserver(self): + a = Series(np.arange(6, dtype="int64")) + b = Series(list("aabbca")) + + df2 = DataFrame( + {"A": a, "B": b.astype(CategoricalDtype(list("cab")))} + ).set_index("B") + result = pd.concat([df2, df2]) + expected = DataFrame( + { + "A": pd.concat([a, a]), + "B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))), + } + ).set_index("B") + tm.assert_frame_equal(result, expected) + + # wrong categories -> uses concat_compat, which casts to object + df3 = DataFrame( + {"A": a, "B": Categorical(b, categories=list("abe"))} + ).set_index("B") + result = pd.concat([df2, df3]) + expected = pd.concat( + [ + df2.set_axis(df2.index.astype(object), axis=0), + df3.set_axis(df3.index.astype(object), axis=0), + ] + ) + tm.assert_frame_equal(result, expected) + + def test_concat_categorical_tz(self): + # GH-23816 + a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific")) + b = Series(["a", "b"], dtype="category") + result = pd.concat([a, b], ignore_index=True) + expected = Series( + [ + pd.Timestamp("2017-01-01", tz="US/Pacific"), + pd.Timestamp("2017-01-02", tz="US/Pacific"), + "a", + "b", + ] + ) + tm.assert_series_equal(result, expected) + + def test_concat_categorical_datetime(self): + # GH-39443 + df1 = DataFrame( + {"x": Series(datetime(2021, 1, 1), index=[0], dtype="category")} + ) + df2 = DataFrame( + {"x": Series(datetime(2021, 1, 2), index=[1], dtype="category")} + ) + + result = pd.concat([df1, df2]) + expected = DataFrame( + {"x": Series([datetime(2021, 1, 1), datetime(2021, 1, 2)])} + ) + + tm.assert_equal(result, expected) + + def test_concat_categorical_unchanged(self): + # GH-12007 + # test fix for when concat on categorical and float + # coerces dtype categorical -> float + df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A")) + ser = Series([0, 1, 2], index=[0, 1, 3], name="B") + result = pd.concat([df, ser], axis=1) + expected = DataFrame( + { + "A": Series(["a", "b", "c", np.nan], dtype="category"), + "B": Series([0, 1, np.nan, 2], dtype="float"), + } + ) + tm.assert_equal(result, expected) + + def test_categorical_concat_gh7864(self): + # GH 7864 + # make sure ordering is preserved + df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")}) + df["grade"] = Categorical(df["raw_grade"]) + df["grade"].cat.set_categories(["e", "a", "b"]) + + df1 = df[0:3] + df2 = df[3:] + + tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories) + tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories) + + dfx = pd.concat([df1, df2]) + tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories) + + dfa = df1._append(df2) + tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories) + + def test_categorical_index_upcast(self): + # GH 17629 + # test upcasting to object when concatenating on categorical indexes + # with non-identical categories + + a = DataFrame({"foo": [1, 2]}, index=Categorical(["foo", "bar"])) + b = DataFrame({"foo": [4, 3]}, index=Categorical(["baz", "bar"])) + + res = pd.concat([a, b]) + exp = DataFrame({"foo": [1, 2, 4, 3]}, index=["foo", "bar", "baz", "bar"]) + + tm.assert_equal(res, exp) + + a = Series([1, 2], index=Categorical(["foo", "bar"])) + b = Series([4, 3], index=Categorical(["baz", "bar"])) + + res = pd.concat([a, b]) + exp = Series([1, 2, 4, 3], index=["foo", "bar", "baz", "bar"]) + + tm.assert_equal(res, exp) + + def test_categorical_missing_from_one_frame(self): + # GH 25412 + df1 = DataFrame({"f1": [1, 2, 3]}) + df2 = DataFrame({"f1": [2, 3, 1], "f2": Series([4, 4, 4]).astype("category")}) + result = pd.concat([df1, df2], sort=True) + dtype = CategoricalDtype([4]) + expected = DataFrame( + { + "f1": [1, 2, 3, 2, 3, 1], + "f2": Categorical.from_codes([-1, -1, -1, 0, 0, 0], dtype=dtype), + }, + index=[0, 1, 2, 0, 1, 2], + ) + tm.assert_frame_equal(result, expected) + + def test_concat_categorical_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/24845 + + c1 = pd.CategoricalIndex(["a", "a"], categories=["a", "b"], ordered=False) + c2 = pd.CategoricalIndex(["b", "b"], categories=["b", "a"], ordered=False) + c3 = pd.CategoricalIndex( + ["a", "a", "b", "b"], categories=["a", "b"], ordered=False + ) + + df1 = DataFrame({"A": [1, 2]}, index=c1) + df2 = DataFrame({"A": [3, 4]}, index=c2) + + result = pd.concat((df1, df2)) + expected = DataFrame({"A": [1, 2, 3, 4]}, index=c3) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py new file mode 100644 index 0000000000000000000000000000000000000000..9e34d02091e698d2ace7acd74896d6e68c556252 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py @@ -0,0 +1,912 @@ +from collections import ( + abc, + deque, +) +from collections.abc import Iterator +from datetime import datetime +from decimal import Decimal + +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + PeriodIndex, + Series, + concat, + date_range, +) +import pandas._testing as tm +from pandas.core.arrays import SparseArray +from pandas.tests.extension.decimal import to_decimal + + +class TestConcatenate: + def test_append_concat(self): + # GH#1815 + d1 = date_range("12/31/1990", "12/31/1999", freq="YE-DEC") + d2 = date_range("12/31/2000", "12/31/2009", freq="YE-DEC") + + s1 = Series(np.random.default_rng(2).standard_normal(10), d1) + s2 = Series(np.random.default_rng(2).standard_normal(10), d2) + + s1 = s1.to_period() + s2 = s2.to_period() + + # drops index + result = concat([s1, s2]) + assert isinstance(result.index, PeriodIndex) + assert result.index[0] == s1.index[0] + + def test_concat_copy(self, using_array_manager, using_copy_on_write): + df = DataFrame(np.random.default_rng(2).standard_normal((4, 3))) + df2 = DataFrame(np.random.default_rng(2).integers(0, 10, size=4).reshape(4, 1)) + df3 = DataFrame({5: "foo"}, index=range(4)) + + # These are actual copies. + result = concat([df, df2, df3], axis=1, copy=True) + + if not using_copy_on_write: + for arr in result._mgr.arrays: + assert not any( + np.shares_memory(arr, y) + for x in [df, df2, df3] + for y in x._mgr.arrays + ) + else: + for arr in result._mgr.arrays: + assert arr.base is not None + + # These are the same. + result = concat([df, df2, df3], axis=1, copy=False) + + for arr in result._mgr.arrays: + if arr.dtype.kind == "f": + assert arr.base is df._mgr.arrays[0].base + elif arr.dtype.kind in ["i", "u"]: + assert arr.base is df2._mgr.arrays[0].base + elif arr.dtype == object: + if using_array_manager: + # we get the same array object, which has no base + assert arr is df3._mgr.arrays[0] + else: + assert arr.base is not None + + # Float block was consolidated. + df4 = DataFrame(np.random.default_rng(2).standard_normal((4, 1))) + result = concat([df, df2, df3, df4], axis=1, copy=False) + for arr in result._mgr.arrays: + if arr.dtype.kind == "f": + if using_array_manager or using_copy_on_write: + # this is a view on some array in either df or df4 + assert any( + np.shares_memory(arr, other) + for other in df._mgr.arrays + df4._mgr.arrays + ) + else: + # the block was consolidated, so we got a copy anyway + assert arr.base is None + elif arr.dtype.kind in ["i", "u"]: + assert arr.base is df2._mgr.arrays[0].base + elif arr.dtype == object: + # this is a view on df3 + assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays) + + def test_concat_with_group_keys(self): + # axis=0 + df = DataFrame(np.random.default_rng(2).standard_normal((3, 4))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((4, 4))) + + result = concat([df, df2], keys=[0, 1]) + exp_index = MultiIndex.from_arrays( + [[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]] + ) + expected = DataFrame(np.r_[df.values, df2.values], index=exp_index) + tm.assert_frame_equal(result, expected) + + result = concat([df, df], keys=[0, 1]) + exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]) + expected = DataFrame(np.r_[df.values, df.values], index=exp_index2) + tm.assert_frame_equal(result, expected) + + # axis=1 + df = DataFrame(np.random.default_rng(2).standard_normal((4, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((4, 4))) + + result = concat([df, df2], keys=[0, 1], axis=1) + expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index) + tm.assert_frame_equal(result, expected) + + result = concat([df, df], keys=[0, 1], axis=1) + expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2) + tm.assert_frame_equal(result, expected) + + def test_concat_keys_specific_levels(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]] + level = ["three", "two", "one", "zero"] + result = concat( + pieces, + axis=1, + keys=["one", "two", "three"], + levels=[level], + names=["group_key"], + ) + + tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key")) + tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3])) + + assert result.columns.names == ["group_key", None] + + @pytest.mark.parametrize("mapping", ["mapping", "dict"]) + def test_concat_mapping(self, mapping, non_dict_mapping_subclass): + constructor = dict if mapping == "dict" else non_dict_mapping_subclass + frames = constructor( + { + "foo": DataFrame(np.random.default_rng(2).standard_normal((4, 3))), + "bar": DataFrame(np.random.default_rng(2).standard_normal((4, 3))), + "baz": DataFrame(np.random.default_rng(2).standard_normal((4, 3))), + "qux": DataFrame(np.random.default_rng(2).standard_normal((4, 3))), + } + ) + + sorted_keys = list(frames.keys()) + + result = concat(frames) + expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys) + tm.assert_frame_equal(result, expected) + + result = concat(frames, axis=1) + expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1) + tm.assert_frame_equal(result, expected) + + keys = ["baz", "foo", "bar"] + result = concat(frames, keys=keys) + expected = concat([frames[k] for k in keys], keys=keys) + tm.assert_frame_equal(result, expected) + + def test_concat_keys_and_levels(self): + df = DataFrame(np.random.default_rng(2).standard_normal((1, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4))) + + levels = [["foo", "baz"], ["one", "two"]] + names = ["first", "second"] + result = concat( + [df, df2, df, df2], + keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], + levels=levels, + names=names, + ) + expected = concat([df, df2, df, df2]) + exp_index = MultiIndex( + levels=levels + [[0]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]], + names=names + [None], + ) + expected.index = exp_index + + tm.assert_frame_equal(result, expected) + + # no names + result = concat( + [df, df2, df, df2], + keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], + levels=levels, + ) + assert result.index.names == (None,) * 3 + + # no levels + result = concat( + [df, df2, df, df2], + keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], + names=["first", "second"], + ) + assert result.index.names == ("first", "second", None) + tm.assert_index_equal( + result.index.levels[0], Index(["baz", "foo"], name="first") + ) + + def test_concat_keys_levels_no_overlap(self): + # GH #1406 + df = DataFrame(np.random.default_rng(2).standard_normal((1, 3)), index=["a"]) + df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4)), index=["b"]) + + msg = "Values not found in passed level" + with pytest.raises(ValueError, match=msg): + concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]]) + + msg = "Key one not in level" + with pytest.raises(ValueError, match=msg): + concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]]) + + def test_crossed_dtypes_weird_corner(self): + columns = ["A", "B", "C", "D"] + df1 = DataFrame( + { + "A": np.array([1, 2, 3, 4], dtype="f8"), + "B": np.array([1, 2, 3, 4], dtype="i8"), + "C": np.array([1, 2, 3, 4], dtype="f8"), + "D": np.array([1, 2, 3, 4], dtype="i8"), + }, + columns=columns, + ) + + df2 = DataFrame( + { + "A": np.array([1, 2, 3, 4], dtype="i8"), + "B": np.array([1, 2, 3, 4], dtype="f8"), + "C": np.array([1, 2, 3, 4], dtype="i8"), + "D": np.array([1, 2, 3, 4], dtype="f8"), + }, + columns=columns, + ) + + appended = concat([df1, df2], ignore_index=True) + expected = DataFrame( + np.concatenate([df1.values, df2.values], axis=0), columns=columns + ) + tm.assert_frame_equal(appended, expected) + + df = DataFrame(np.random.default_rng(2).standard_normal((1, 3)), index=["a"]) + df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4)), index=["b"]) + result = concat([df, df2], keys=["one", "two"], names=["first", "second"]) + assert result.index.names == ("first", "second") + + def test_with_mixed_tuples(self, sort): + # 10697 + # columns have mixed tuples, so handle properly + df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2)) + df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2)) + + # it works + concat([df1, df2], sort=sort) + + def test_concat_mixed_objs_columns(self): + # Test column-wise concat for mixed series/frames (axis=1) + # G2385 + + index = date_range("01-Jan-2013", periods=10, freq="h") + arr = np.arange(10, dtype="int64") + s1 = Series(arr, index=index) + s2 = Series(arr, index=index) + df = DataFrame(arr.reshape(-1, 1), index=index) + + expected = DataFrame( + np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0] + ) + result = concat([df, df], axis=1) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1] + ) + result = concat([s1, s2], axis=1) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2] + ) + result = concat([s1, s2, s1], axis=1) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3] + ) + result = concat([s1, df, s2, s2, s1], axis=1) + tm.assert_frame_equal(result, expected) + + # with names + s1.name = "foo" + expected = DataFrame( + np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0] + ) + result = concat([s1, df, s2], axis=1) + tm.assert_frame_equal(result, expected) + + s2.name = "bar" + expected = DataFrame( + np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"] + ) + result = concat([s1, df, s2], axis=1) + tm.assert_frame_equal(result, expected) + + # ignore index + expected = DataFrame( + np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2] + ) + result = concat([s1, df, s2], axis=1, ignore_index=True) + tm.assert_frame_equal(result, expected) + + def test_concat_mixed_objs_index(self): + # Test row-wise concat for mixed series/frames with a common name + # GH2385, GH15047 + + index = date_range("01-Jan-2013", periods=10, freq="h") + arr = np.arange(10, dtype="int64") + s1 = Series(arr, index=index) + s2 = Series(arr, index=index) + df = DataFrame(arr.reshape(-1, 1), index=index) + + expected = DataFrame( + np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0] + ) + result = concat([s1, df, s2]) + tm.assert_frame_equal(result, expected) + + def test_concat_mixed_objs_index_names(self): + # Test row-wise concat for mixed series/frames with distinct names + # GH2385, GH15047 + + index = date_range("01-Jan-2013", periods=10, freq="h") + arr = np.arange(10, dtype="int64") + s1 = Series(arr, index=index, name="foo") + s2 = Series(arr, index=index, name="bar") + df = DataFrame(arr.reshape(-1, 1), index=index) + + expected = DataFrame( + np.kron(np.where(np.identity(3) == 1, 1, np.nan), arr).T, + index=index.tolist() * 3, + columns=["foo", 0, "bar"], + ) + result = concat([s1, df, s2]) + tm.assert_frame_equal(result, expected) + + # Rename all series to 0 when ignore_index=True + expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0]) + result = concat([s1, df, s2], ignore_index=True) + tm.assert_frame_equal(result, expected) + + def test_dtype_coercion(self): + # 12411 + df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]}) + + result = concat([df.iloc[[0]], df.iloc[[1]]]) + tm.assert_series_equal(result.dtypes, df.dtypes) + + # 12045 + df = DataFrame({"date": [datetime(2012, 1, 1), datetime(1012, 1, 2)]}) + result = concat([df.iloc[[0]], df.iloc[[1]]]) + tm.assert_series_equal(result.dtypes, df.dtypes) + + # 11594 + df = DataFrame({"text": ["some words"] + [None] * 9}) + result = concat([df.iloc[[0]], df.iloc[[1]]]) + tm.assert_series_equal(result.dtypes, df.dtypes) + + def test_concat_single_with_key(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + + result = concat([df], keys=["foo"]) + expected = concat([df, df], keys=["foo", "bar"]) + tm.assert_frame_equal(result, expected[:10]) + + def test_concat_no_items_raises(self): + with pytest.raises(ValueError, match="No objects to concatenate"): + concat([]) + + def test_concat_exclude_none(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + + pieces = [df[:5], None, None, df[5:]] + result = concat(pieces) + tm.assert_frame_equal(result, df) + with pytest.raises(ValueError, match="All objects passed were None"): + concat([None, None]) + + def test_concat_keys_with_none(self): + # #1649 + df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]]) + + result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0}) + expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0}) + tm.assert_frame_equal(result, expected) + + result = concat( + [None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"] + ) + expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"]) + tm.assert_frame_equal(result, expected) + + def test_concat_bug_1719(self): + ts1 = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + ts2 = ts1.copy()[::2] + + # to join with union + # these two are of different length! + left = concat([ts1, ts2], join="outer", axis=1) + right = concat([ts2, ts1], join="outer", axis=1) + + assert len(left) == len(right) + + def test_concat_bug_2972(self): + ts0 = Series(np.zeros(5)) + ts1 = Series(np.ones(5)) + ts0.name = ts1.name = "same name" + result = concat([ts0, ts1], axis=1) + + expected = DataFrame({0: ts0, 1: ts1}) + expected.columns = ["same name", "same name"] + tm.assert_frame_equal(result, expected) + + def test_concat_bug_3602(self): + # GH 3602, duplicate columns + df1 = DataFrame( + { + "firmNo": [0, 0, 0, 0], + "prc": [6, 6, 6, 6], + "stringvar": ["rrr", "rrr", "rrr", "rrr"], + } + ) + df2 = DataFrame( + {"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]} + ) + expected = DataFrame( + [ + [0, 6, "rrr", 9, 1, 6], + [0, 6, "rrr", 10, 2, 6], + [0, 6, "rrr", 11, 3, 6], + [0, 6, "rrr", 12, 4, 6], + ] + ) + expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"] + + result = concat([df1, df2], axis=1) + tm.assert_frame_equal(result, expected) + + def test_concat_iterables(self): + # GH8645 check concat works with tuples, list, generators, and weird + # stuff like deque and custom iterables + df1 = DataFrame([1, 2, 3]) + df2 = DataFrame([4, 5, 6]) + expected = DataFrame([1, 2, 3, 4, 5, 6]) + tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected) + tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected) + tm.assert_frame_equal( + concat((df for df in (df1, df2)), ignore_index=True), expected + ) + tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected) + + class CustomIterator1: + def __len__(self) -> int: + return 2 + + def __getitem__(self, index): + try: + return {0: df1, 1: df2}[index] + except KeyError as err: + raise IndexError from err + + tm.assert_frame_equal(concat(CustomIterator1(), ignore_index=True), expected) + + class CustomIterator2(abc.Iterable): + def __iter__(self) -> Iterator: + yield df1 + yield df2 + + tm.assert_frame_equal(concat(CustomIterator2(), ignore_index=True), expected) + + def test_concat_order(self): + # GH 17344, GH#47331 + dfs = [DataFrame(index=range(3), columns=["a", 1, None])] + dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for _ in range(100)] + + result = concat(dfs, sort=True).columns + expected = Index([1, "a", None]) + tm.assert_index_equal(result, expected) + + def test_concat_different_extension_dtypes_upcasts(self): + a = Series(pd.array([1, 2], dtype="Int64")) + b = Series(to_decimal([1, 2])) + + result = concat([a, b], ignore_index=True) + expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object) + tm.assert_series_equal(result, expected) + + def test_concat_ordered_dict(self): + # GH 21510 + expected = concat( + [Series(range(3)), Series(range(4))], keys=["First", "Another"] + ) + result = concat({"First": Series(range(3)), "Another": Series(range(4))}) + tm.assert_series_equal(result, expected) + + def test_concat_duplicate_indices_raise(self): + # GH 45888: test raise for concat DataFrames with duplicate indices + # https://github.com/pandas-dev/pandas/issues/36263 + df1 = DataFrame( + np.random.default_rng(2).standard_normal(5), + index=[0, 1, 2, 3, 3], + columns=["a"], + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal(5), + index=[0, 1, 2, 2, 4], + columns=["b"], + ) + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + concat([df1, df2], axis=1) + + +def test_concat_no_unnecessary_upcast(float_numpy_dtype, frame_or_series): + # GH 13247 + dims = frame_or_series(dtype=object).ndim + dt = float_numpy_dtype + + dfs = [ + frame_or_series(np.array([1], dtype=dt, ndmin=dims)), + frame_or_series(np.array([np.nan], dtype=dt, ndmin=dims)), + frame_or_series(np.array([5], dtype=dt, ndmin=dims)), + ] + x = concat(dfs) + assert x.values.dtype == dt + + +@pytest.mark.parametrize("pdt", [Series, DataFrame]) +def test_concat_will_upcast(pdt, any_signed_int_numpy_dtype): + dt = any_signed_int_numpy_dtype + dims = pdt().ndim + dfs = [ + pdt(np.array([1], dtype=dt, ndmin=dims)), + pdt(np.array([np.nan], ndmin=dims)), + pdt(np.array([5], dtype=dt, ndmin=dims)), + ] + x = concat(dfs) + assert x.values.dtype == "float64" + + +def test_concat_empty_and_non_empty_frame_regression(): + # GH 18178 regression test + df1 = DataFrame({"foo": [1]}) + df2 = DataFrame({"foo": []}) + expected = DataFrame({"foo": [1.0]}) + result = concat([df1, df2]) + tm.assert_frame_equal(result, expected) + + +def test_concat_sparse(): + # GH 23557 + a = Series(SparseArray([0, 1, 2])) + expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype( + pd.SparseDtype(np.int64, 0) + ) + result = concat([a, a], axis=1) + tm.assert_frame_equal(result, expected) + + +def test_concat_dense_sparse(): + # GH 30668 + dtype = pd.SparseDtype(np.float64, None) + a = Series(pd.arrays.SparseArray([1, None]), dtype=dtype) + b = Series([1], dtype=float) + expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(dtype) + result = concat([a, b], axis=0) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]]) +def test_duplicate_keys(keys): + # GH 33654 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + s1 = Series([7, 8, 9], name="c") + s2 = Series([10, 11, 12], name="d") + result = concat([df, s1, s2], axis=1, keys=keys) + expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + expected_columns = MultiIndex.from_tuples( + [(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")] + ) + expected = DataFrame(expected_values, columns=expected_columns) + tm.assert_frame_equal(result, expected) + + +def test_duplicate_keys_same_frame(): + # GH 43595 + keys = ["e", "e"] + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + result = concat([df, df], axis=1, keys=keys) + expected_values = [[1, 4, 1, 4], [2, 5, 2, 5], [3, 6, 3, 6]] + expected_columns = MultiIndex.from_tuples( + [(keys[0], "a"), (keys[0], "b"), (keys[1], "a"), (keys[1], "b")] + ) + expected = DataFrame(expected_values, columns=expected_columns) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) +@pytest.mark.parametrize( + "obj", + [ + tm.SubclassedDataFrame({"A": np.arange(0, 10)}), + tm.SubclassedSeries(np.arange(0, 10), name="A"), + ], +) +def test_concat_preserves_subclass(obj): + # GH28330 -- preserve subclass + + result = concat([obj, obj]) + assert isinstance(result, type(obj)) + + +def test_concat_frame_axis0_extension_dtypes(): + # preserve extension dtype (through common_dtype mechanism) + df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")}) + df2 = DataFrame({"a": np.array([4, 5, 6])}) + + result = concat([df1, df2], ignore_index=True) + expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64") + tm.assert_frame_equal(result, expected) + + result = concat([df2, df1], ignore_index=True) + expected = DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64") + tm.assert_frame_equal(result, expected) + + +def test_concat_preserves_extension_int64_dtype(): + # GH 24768 + df_a = DataFrame({"a": [-1]}, dtype="Int64") + df_b = DataFrame({"b": [1]}, dtype="Int64") + result = concat([df_a, df_b], ignore_index=True) + expected = DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype1,dtype2,expected_dtype", + [ + ("bool", "bool", "bool"), + ("boolean", "bool", "boolean"), + ("bool", "boolean", "boolean"), + ("boolean", "boolean", "boolean"), + ], +) +def test_concat_bool_types(dtype1, dtype2, expected_dtype): + # GH 42800 + ser1 = Series([True, False], dtype=dtype1) + ser2 = Series([False, True], dtype=dtype2) + result = concat([ser1, ser2], ignore_index=True) + expected = Series([True, False, False, True], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + ("keys", "integrity"), + [ + (["red"] * 3, True), + (["red"] * 3, False), + (["red", "blue", "red"], False), + (["red", "blue", "red"], True), + ], +) +def test_concat_repeated_keys(keys, integrity): + # GH: 20816 + series_list = [Series({"a": 1}), Series({"b": 2}), Series({"c": 3})] + result = concat(series_list, keys=keys, verify_integrity=integrity) + tuples = list(zip(keys, ["a", "b", "c"])) + expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples)) + tm.assert_series_equal(result, expected) + + +def test_concat_null_object_with_dti(): + # GH#40841 + dti = pd.DatetimeIndex( + ["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)" + ) + right = DataFrame(data={"C": [0.5274]}, index=dti) + + idx = Index([None], dtype="object", name="Maybe Time (UTC)") + left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx) + + result = concat([left, right], axis="columns") + + exp_index = Index([None, dti[0]], dtype=object) + expected = DataFrame( + { + "A": np.array([None, np.nan], dtype=object), + "B": [np.nan, np.nan], + "C": [np.nan, 0.5274], + }, + index=exp_index, + ) + tm.assert_frame_equal(result, expected) + + +def test_concat_multiindex_with_empty_rangeindex(): + # GH#41234 + mi = MultiIndex.from_tuples([("B", 1), ("C", 1)]) + df1 = DataFrame([[1, 2]], columns=mi) + df2 = DataFrame(index=[1], columns=pd.RangeIndex(0)) + + result = concat([df1, df2]) + expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [ + Series(data=[1, 2]), + DataFrame( + data={ + "col1": [1, 2], + } + ), + DataFrame(dtype=float), + Series(dtype=float), + ], +) +def test_concat_drop_attrs(data): + # GH#41828 + df1 = data.copy() + df1.attrs = {1: 1} + df2 = data.copy() + df2.attrs = {1: 2} + df = concat([df1, df2]) + assert len(df.attrs) == 0 + + +@pytest.mark.parametrize( + "data", + [ + Series(data=[1, 2]), + DataFrame( + data={ + "col1": [1, 2], + } + ), + DataFrame(dtype=float), + Series(dtype=float), + ], +) +def test_concat_retain_attrs(data): + # GH#41828 + df1 = data.copy() + df1.attrs = {1: 1} + df2 = data.copy() + df2.attrs = {1: 1} + df = concat([df1, df2]) + assert df.attrs[1] == 1 + + +@td.skip_array_manager_invalid_test +@pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"]) +@pytest.mark.parametrize("empty_dtype", [None, "float64", "object"]) +def test_concat_ignore_empty_object_float(empty_dtype, df_dtype): + # https://github.com/pandas-dev/pandas/issues/45637 + df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype) + empty = DataFrame(columns=["foo", "bar"], dtype=empty_dtype) + + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + warn = None + if df_dtype == "datetime64[ns]" or ( + df_dtype == "float64" and empty_dtype != "float64" + ): + warn = FutureWarning + with tm.assert_produces_warning(warn, match=msg): + result = concat([empty, df]) + expected = df + if df_dtype == "int64": + # TODO what exact behaviour do we want for integer eventually? + if empty_dtype == "float64": + expected = df.astype("float64") + else: + expected = df.astype("object") + tm.assert_frame_equal(result, expected) + + +@td.skip_array_manager_invalid_test +@pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"]) +@pytest.mark.parametrize("empty_dtype", [None, "float64", "object"]) +def test_concat_ignore_all_na_object_float(empty_dtype, df_dtype): + df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype) + empty = DataFrame({"foo": [np.nan], "bar": [np.nan]}, dtype=empty_dtype) + + if df_dtype == "int64": + # TODO what exact behaviour do we want for integer eventually? + if empty_dtype == "object": + df_dtype = "object" + else: + df_dtype = "float64" + + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + warn = None + if empty_dtype != df_dtype and empty_dtype is not None: + warn = FutureWarning + elif df_dtype == "datetime64[ns]": + warn = FutureWarning + + with tm.assert_produces_warning(warn, match=msg): + result = concat([empty, df], ignore_index=True) + + expected = DataFrame({"foo": [np.nan, 1, 2], "bar": [np.nan, 1, 2]}, dtype=df_dtype) + tm.assert_frame_equal(result, expected) + + +@td.skip_array_manager_invalid_test +def test_concat_ignore_empty_from_reindex(): + # https://github.com/pandas-dev/pandas/pull/43507#issuecomment-920375856 + df1 = DataFrame({"a": [1], "b": [pd.Timestamp("2012-01-01")]}) + df2 = DataFrame({"a": [2]}) + + aligned = df2.reindex(columns=df1.columns) + + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([df1, aligned], ignore_index=True) + expected = df1 = DataFrame({"a": [1, 2], "b": [pd.Timestamp("2012-01-01"), pd.NaT]}) + tm.assert_frame_equal(result, expected) + + +def test_concat_mismatched_keys_length(): + # GH#43485 + ser = Series(range(5)) + sers = [ser + n for n in range(4)] + keys = ["A", "B", "C"] + + msg = r"The behavior of pd.concat with len\(keys\) != len\(objs\) is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + concat(sers, keys=keys, axis=1) + with tm.assert_produces_warning(FutureWarning, match=msg): + concat(sers, keys=keys, axis=0) + with tm.assert_produces_warning(FutureWarning, match=msg): + concat((x for x in sers), keys=(y for y in keys), axis=1) + with tm.assert_produces_warning(FutureWarning, match=msg): + concat((x for x in sers), keys=(y for y in keys), axis=0) + + +def test_concat_multiindex_with_category(): + df1 = DataFrame( + { + "c1": Series(list("abc"), dtype="category"), + "c2": Series(list("eee"), dtype="category"), + "i2": Series([1, 2, 3]), + } + ) + df1 = df1.set_index(["c1", "c2"]) + df2 = DataFrame( + { + "c1": Series(list("abc"), dtype="category"), + "c2": Series(list("eee"), dtype="category"), + "i2": Series([4, 5, 6]), + } + ) + df2 = df2.set_index(["c1", "c2"]) + result = concat([df1, df2]) + expected = DataFrame( + { + "c1": Series(list("abcabc"), dtype="category"), + "c2": Series(list("eeeeee"), dtype="category"), + "i2": Series([1, 2, 3, 4, 5, 6]), + } + ) + expected = expected.set_index(["c1", "c2"]) + tm.assert_frame_equal(result, expected) + + +def test_concat_ea_upcast(): + # GH#54848 + df1 = DataFrame(["a"], dtype="string") + df2 = DataFrame([1], dtype="Int64") + result = concat([df1, df2]) + expected = DataFrame(["a", 1], index=[0, 0]) + tm.assert_frame_equal(result, expected) + + +def test_concat_none_with_timezone_timestamp(): + # GH#52093 + df1 = DataFrame([{"A": None}]) + df2 = DataFrame([{"A": pd.Timestamp("1990-12-20 00:00:00+00:00")}]) + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([df1, df2], ignore_index=True) + expected = DataFrame({"A": [None, pd.Timestamp("1990-12-20 00:00:00+00:00")]}) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..f288921c25753463030cfe4f765b2709b676c858 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py @@ -0,0 +1,230 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + concat, +) +import pandas._testing as tm + + +class TestDataFrameConcat: + def test_concat_multiple_frames_dtypes(self): + # GH#2759 + df1 = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64) + df2 = DataFrame(data=np.ones((10, 2)), dtype=np.float32) + results = concat((df1, df2), axis=1).dtypes + expected = Series( + [np.dtype("float64")] * 2 + [np.dtype("float32")] * 2, + index=["foo", "bar", 0, 1], + ) + tm.assert_series_equal(results, expected) + + def test_concat_tuple_keys(self): + # GH#14438 + df1 = DataFrame(np.ones((2, 2)), columns=list("AB")) + df2 = DataFrame(np.ones((3, 2)) * 2, columns=list("AB")) + results = concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")]) + expected = DataFrame( + { + "A": { + ("bee", "bah", 0): 1.0, + ("bee", "bah", 1): 1.0, + ("bee", "boo", 0): 2.0, + ("bee", "boo", 1): 2.0, + ("bee", "boo", 2): 2.0, + }, + "B": { + ("bee", "bah", 0): 1.0, + ("bee", "bah", 1): 1.0, + ("bee", "boo", 0): 2.0, + ("bee", "boo", 1): 2.0, + ("bee", "boo", 2): 2.0, + }, + } + ) + tm.assert_frame_equal(results, expected) + + def test_concat_named_keys(self): + # GH#14252 + df = DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]}) + index = Index(["a", "b"], name="baz") + concatted_named_from_keys = concat([df, df], keys=index) + expected_named = DataFrame( + {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]}, + index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]), + ) + tm.assert_frame_equal(concatted_named_from_keys, expected_named) + + index_no_name = Index(["a", "b"], name=None) + concatted_named_from_names = concat([df, df], keys=index_no_name, names=["baz"]) + tm.assert_frame_equal(concatted_named_from_names, expected_named) + + concatted_unnamed = concat([df, df], keys=index_no_name) + expected_unnamed = DataFrame( + {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]}, + index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]), + ) + tm.assert_frame_equal(concatted_unnamed, expected_unnamed) + + def test_concat_axis_parameter(self): + # GH#14369 + df1 = DataFrame({"A": [0.1, 0.2]}, index=range(2)) + df2 = DataFrame({"A": [0.3, 0.4]}, index=range(2)) + + # Index/row/0 DataFrame + expected_index = DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1]) + + concatted_index = concat([df1, df2], axis="index") + tm.assert_frame_equal(concatted_index, expected_index) + + concatted_row = concat([df1, df2], axis="rows") + tm.assert_frame_equal(concatted_row, expected_index) + + concatted_0 = concat([df1, df2], axis=0) + tm.assert_frame_equal(concatted_0, expected_index) + + # Columns/1 DataFrame + expected_columns = DataFrame( + [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"] + ) + + concatted_columns = concat([df1, df2], axis="columns") + tm.assert_frame_equal(concatted_columns, expected_columns) + + concatted_1 = concat([df1, df2], axis=1) + tm.assert_frame_equal(concatted_1, expected_columns) + + series1 = Series([0.1, 0.2]) + series2 = Series([0.3, 0.4]) + + # Index/row/0 Series + expected_index_series = Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1]) + + concatted_index_series = concat([series1, series2], axis="index") + tm.assert_series_equal(concatted_index_series, expected_index_series) + + concatted_row_series = concat([series1, series2], axis="rows") + tm.assert_series_equal(concatted_row_series, expected_index_series) + + concatted_0_series = concat([series1, series2], axis=0) + tm.assert_series_equal(concatted_0_series, expected_index_series) + + # Columns/1 Series + expected_columns_series = DataFrame( + [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1] + ) + + concatted_columns_series = concat([series1, series2], axis="columns") + tm.assert_frame_equal(concatted_columns_series, expected_columns_series) + + concatted_1_series = concat([series1, series2], axis=1) + tm.assert_frame_equal(concatted_1_series, expected_columns_series) + + # Testing ValueError + with pytest.raises(ValueError, match="No axis named"): + concat([series1, series2], axis="something") + + def test_concat_numerical_names(self): + # GH#15262, GH#12223 + df = DataFrame( + {"col": range(9)}, + dtype="int32", + index=( + pd.MultiIndex.from_product( + [["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2] + ) + ), + ) + result = concat((df.iloc[:2, :], df.iloc[-2:, :])) + expected = DataFrame( + {"col": [0, 1, 7, 8]}, + dtype="int32", + index=pd.MultiIndex.from_tuples( + [("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_concat_astype_dup_col(self): + # GH#23049 + df = DataFrame([{"a": "b"}]) + df = concat([df, df], axis=1) + + result = df.astype("category") + expected = DataFrame( + np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"] + ).astype("category") + tm.assert_frame_equal(result, expected) + + def test_concat_dataframe_keys_bug(self, sort): + t1 = DataFrame( + {"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))} + ) + t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))}) + + # it works + result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort) + assert list(result.columns) == [("t1", "value"), ("t2", "value")] + + def test_concat_bool_with_int(self): + # GH#42092 we may want to change this to return object, but that + # would need a deprecation + df1 = DataFrame(Series([True, False, True, True], dtype="bool")) + df2 = DataFrame(Series([1, 0, 1], dtype="int64")) + + result = concat([df1, df2]) + expected = concat([df1.astype("int64"), df2]) + tm.assert_frame_equal(result, expected) + + def test_concat_duplicates_in_index_with_keys(self): + # GH#42651 + index = [1, 1, 3] + data = [1, 2, 3] + + df = DataFrame(data=data, index=index) + result = concat([df], keys=["A"], names=["ID", "date"]) + mi = pd.MultiIndex.from_product([["A"], index], names=["ID", "date"]) + expected = DataFrame(data=data, index=mi) + tm.assert_frame_equal(result, expected) + tm.assert_index_equal(result.index.levels[1], Index([1, 3], name="date")) + + @pytest.mark.parametrize("ignore_index", [True, False]) + @pytest.mark.parametrize("order", ["C", "F"]) + @pytest.mark.parametrize("axis", [0, 1]) + def test_concat_copies(self, axis, order, ignore_index, using_copy_on_write): + # based on asv ConcatDataFrames + df = DataFrame(np.zeros((10, 5), dtype=np.float32, order=order)) + + res = concat([df] * 5, axis=axis, ignore_index=ignore_index, copy=True) + + if not using_copy_on_write: + for arr in res._iter_column_arrays(): + for arr2 in df._iter_column_arrays(): + assert not np.shares_memory(arr, arr2) + + def test_outer_sort_columns(self): + # GH#47127 + df1 = DataFrame({"A": [0], "B": [1], 0: 1}) + df2 = DataFrame({"A": [100]}) + result = concat([df1, df2], ignore_index=True, join="outer", sort=True) + expected = DataFrame({0: [1.0, np.nan], "A": [0, 100], "B": [1.0, np.nan]}) + tm.assert_frame_equal(result, expected) + + def test_inner_sort_columns(self): + # GH#47127 + df1 = DataFrame({"A": [0], "B": [1], 0: 1}) + df2 = DataFrame({"A": [100], 0: 2}) + result = concat([df1, df2], ignore_index=True, join="inner", sort=True) + expected = DataFrame({0: [1, 2], "A": [0, 100]}) + tm.assert_frame_equal(result, expected) + + def test_sort_columns_one_df(self): + # GH#47127 + df1 = DataFrame({"A": [100], 0: 2}) + result = concat([df1], ignore_index=True, join="inner", sort=True) + expected = DataFrame({0: [2], "A": [100]}) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py new file mode 100644 index 0000000000000000000000000000000000000000..4c94dc0d51f7e7de9fbd43150b1331282fbabb90 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py @@ -0,0 +1,606 @@ +import datetime as dt +from datetime import datetime + +import dateutil +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, + concat, + date_range, + to_timedelta, +) +import pandas._testing as tm + + +class TestDatetimeConcat: + def test_concat_datetime64_block(self): + rng = date_range("1/1/2000", periods=10) + + df = DataFrame({"time": rng}) + + result = concat([df, df]) + assert (result.iloc[:10]["time"] == rng).all() + assert (result.iloc[10:]["time"] == rng).all() + + def test_concat_datetime_datetime64_frame(self): + # GH#2624 + rows = [] + rows.append([datetime(2010, 1, 1), 1]) + rows.append([datetime(2010, 1, 2), "hi"]) + + df2_obj = DataFrame.from_records(rows, columns=["date", "test"]) + + ind = date_range(start="2000/1/1", freq="D", periods=10) + df1 = DataFrame({"date": ind, "test": range(10)}) + + # it works! + concat([df1, df2_obj]) + + def test_concat_datetime_timezone(self): + # GH 18523 + idx1 = date_range("2011-01-01", periods=3, freq="h", tz="Europe/Paris") + idx2 = date_range(start=idx1[0], end=idx1[-1], freq="h") + df1 = DataFrame({"a": [1, 2, 3]}, index=idx1) + df2 = DataFrame({"b": [1, 2, 3]}, index=idx2) + result = concat([df1, df2], axis=1) + + exp_idx = DatetimeIndex( + [ + "2011-01-01 00:00:00+01:00", + "2011-01-01 01:00:00+01:00", + "2011-01-01 02:00:00+01:00", + ], + dtype="M8[ns, Europe/Paris]", + freq="h", + ) + expected = DataFrame( + [[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"] + ) + + tm.assert_frame_equal(result, expected) + + idx3 = date_range("2011-01-01", periods=3, freq="h", tz="Asia/Tokyo") + df3 = DataFrame({"b": [1, 2, 3]}, index=idx3) + result = concat([df1, df3], axis=1) + + exp_idx = DatetimeIndex( + [ + "2010-12-31 15:00:00+00:00", + "2010-12-31 16:00:00+00:00", + "2010-12-31 17:00:00+00:00", + "2010-12-31 23:00:00+00:00", + "2011-01-01 00:00:00+00:00", + "2011-01-01 01:00:00+00:00", + ] + ).as_unit("ns") + + expected = DataFrame( + [ + [np.nan, 1], + [np.nan, 2], + [np.nan, 3], + [1, np.nan], + [2, np.nan], + [3, np.nan], + ], + index=exp_idx, + columns=["a", "b"], + ) + + tm.assert_frame_equal(result, expected) + + # GH 13783: Concat after resample + result = concat([df1.resample("h").mean(), df2.resample("h").mean()], sort=True) + expected = DataFrame( + {"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]}, + index=idx1.append(idx1), + ) + tm.assert_frame_equal(result, expected) + + def test_concat_datetimeindex_freq(self): + # GH 3232 + # Monotonic index result + dr = date_range("01-Jan-2013", periods=100, freq="50ms", tz="UTC") + data = list(range(100)) + expected = DataFrame(data, index=dr) + result = concat([expected[:50], expected[50:]]) + tm.assert_frame_equal(result, expected) + + # Non-monotonic index result + result = concat([expected[50:], expected[:50]]) + expected = DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50])) + expected.index._data.freq = None + tm.assert_frame_equal(result, expected) + + def test_concat_multiindex_datetime_object_index(self): + # https://github.com/pandas-dev/pandas/issues/11058 + idx = Index( + [dt.date(2013, 1, 1), dt.date(2014, 1, 1), dt.date(2015, 1, 1)], + dtype="object", + ) + + s = Series( + ["a", "b"], + index=MultiIndex.from_arrays( + [ + [1, 2], + idx[:-1], + ], + names=["first", "second"], + ), + ) + s2 = Series( + ["a", "b"], + index=MultiIndex.from_arrays( + [[1, 2], idx[::2]], + names=["first", "second"], + ), + ) + mi = MultiIndex.from_arrays( + [[1, 2, 2], idx], + names=["first", "second"], + ) + assert mi.levels[1].dtype == object + + expected = DataFrame( + [["a", "a"], ["b", np.nan], [np.nan, "b"]], + index=mi, + ) + result = concat([s, s2], axis=1) + tm.assert_frame_equal(result, expected) + + def test_concat_NaT_series(self): + # GH 11693 + # test for merging NaT series with datetime series. + x = Series( + date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="US/Eastern") + ) + y = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]") + expected = Series([x[0], x[1], pd.NaT, pd.NaT]) + + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + # all NaT with tz + expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns, US/Eastern]") + result = concat([y, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + def test_concat_NaT_series2(self): + # without tz + x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h")) + y = Series(date_range("20151124 10:00", "20151124 11:00", freq="1h")) + y[:] = pd.NaT + expected = Series([x[0], x[1], pd.NaT, pd.NaT]) + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + # all NaT without tz + x[:] = pd.NaT + expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns]") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_concat_NaT_dataframes(self, tz): + # GH 12396 + + dti = DatetimeIndex([pd.NaT, pd.NaT], tz=tz) + first = DataFrame({0: dti}) + second = DataFrame( + [[Timestamp("2015/01/01", tz=tz)], [Timestamp("2016/01/01", tz=tz)]], + index=[2, 3], + ) + expected = DataFrame( + [ + pd.NaT, + pd.NaT, + Timestamp("2015/01/01", tz=tz), + Timestamp("2016/01/01", tz=tz), + ] + ) + + result = concat([first, second], axis=0) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz1", [None, "UTC"]) + @pytest.mark.parametrize("tz2", [None, "UTC"]) + @pytest.mark.parametrize("item", [pd.NaT, Timestamp("20150101")]) + def test_concat_NaT_dataframes_all_NaT_axis_0( + self, tz1, tz2, item, using_array_manager + ): + # GH 12396 + + # tz-naive + first = DataFrame([[pd.NaT], [pd.NaT]]).apply(lambda x: x.dt.tz_localize(tz1)) + second = DataFrame([item]).apply(lambda x: x.dt.tz_localize(tz2)) + + result = concat([first, second], axis=0) + expected = DataFrame(Series([pd.NaT, pd.NaT, item], index=[0, 1, 0])) + expected = expected.apply(lambda x: x.dt.tz_localize(tz2)) + if tz1 != tz2: + expected = expected.astype(object) + if item is pd.NaT and not using_array_manager: + # GH#18463 + # TODO: setting nan here is to keep the test passing as we + # make assert_frame_equal stricter, but is nan really the + # ideal behavior here? + if tz1 is not None: + expected.iloc[-1, 0] = np.nan + else: + expected.iloc[:-1, 0] = np.nan + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz1", [None, "UTC"]) + @pytest.mark.parametrize("tz2", [None, "UTC"]) + def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2): + # GH 12396 + + first = DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)) + second = DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1]) + expected = DataFrame( + { + 0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1), + 1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2), + } + ) + result = concat([first, second], axis=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz1", [None, "UTC"]) + @pytest.mark.parametrize("tz2", [None, "UTC"]) + def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): + # GH 12396 + + # tz-naive + first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1) + second = DataFrame( + [ + [Timestamp("2015/01/01", tz=tz2)], + [Timestamp("2016/01/01", tz=tz2)], + ], + index=[2, 3], + ) + + expected = DataFrame( + [ + pd.NaT, + pd.NaT, + Timestamp("2015/01/01", tz=tz2), + Timestamp("2016/01/01", tz=tz2), + ] + ) + if tz1 != tz2: + expected = expected.astype(object) + + result = concat([first, second]) + tm.assert_frame_equal(result, expected) + + +class TestTimezoneConcat: + def test_concat_tz_series(self): + # gh-11755: tz and no tz + x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC")) + y = Series(date_range("2012-01-01", "2012-01-02")) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + def test_concat_tz_series2(self): + # gh-11887: concat tz and object + x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC")) + y = Series(["a", "b"]) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + def test_concat_tz_series3(self, unit, unit2): + # see gh-12217 and gh-12306 + # Concatenating two UTC times + first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]") + first[0] = first[0].dt.tz_localize("UTC") + + second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]") + second[0] = second[0].dt.tz_localize("UTC") + + result = concat([first, second]) + exp_unit = tm.get_finest_unit(unit, unit2) + assert result[0].dtype == f"datetime64[{exp_unit}, UTC]" + + def test_concat_tz_series4(self, unit, unit2): + # Concatenating two London times + first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]") + first[0] = first[0].dt.tz_localize("Europe/London") + + second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]") + second[0] = second[0].dt.tz_localize("Europe/London") + + result = concat([first, second]) + exp_unit = tm.get_finest_unit(unit, unit2) + assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]" + + def test_concat_tz_series5(self, unit, unit2): + # Concatenating 2+1 London times + first = DataFrame( + [[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]], dtype=f"M8[{unit}]" + ) + first[0] = first[0].dt.tz_localize("Europe/London") + + second = DataFrame([[datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]") + second[0] = second[0].dt.tz_localize("Europe/London") + + result = concat([first, second]) + exp_unit = tm.get_finest_unit(unit, unit2) + assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]" + + def test_concat_tz_series6(self, unit, unit2): + # Concatenating 1+2 London times + first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]") + first[0] = first[0].dt.tz_localize("Europe/London") + + second = DataFrame( + [[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]" + ) + second[0] = second[0].dt.tz_localize("Europe/London") + + result = concat([first, second]) + exp_unit = tm.get_finest_unit(unit, unit2) + assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]" + + def test_concat_tz_series_tzlocal(self): + # see gh-13583 + x = [ + Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()), + Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()), + ] + y = [ + Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()), + Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()), + ] + + result = concat([Series(x), Series(y)], ignore_index=True) + tm.assert_series_equal(result, Series(x + y)) + assert result.dtype == "datetime64[ns, tzlocal()]" + + def test_concat_tz_series_with_datetimelike(self): + # see gh-12620: tz and timedelta + x = [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-02-01", tz="US/Eastern"), + ] + y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")] + result = concat([Series(x), Series(y)], ignore_index=True) + tm.assert_series_equal(result, Series(x + y, dtype="object")) + + # tz and period + y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")] + result = concat([Series(x), Series(y)], ignore_index=True) + tm.assert_series_equal(result, Series(x + y, dtype="object")) + + def test_concat_tz_frame(self): + df2 = DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern"), + "B": Timestamp("20130603", tz="CET"), + }, + index=range(5), + ) + + # concat + df3 = concat([df2.A.to_frame(), df2.B.to_frame()], axis=1) + tm.assert_frame_equal(df2, df3) + + def test_concat_multiple_tzs(self): + # GH#12467 + # combining datetime tz-aware and naive DataFrames + ts1 = Timestamp("2015-01-01", tz=None) + ts2 = Timestamp("2015-01-01", tz="UTC") + ts3 = Timestamp("2015-01-01", tz="EST") + + df1 = DataFrame({"time": [ts1]}) + df2 = DataFrame({"time": [ts2]}) + df3 = DataFrame({"time": [ts3]}) + + results = concat([df1, df2]).reset_index(drop=True) + expected = DataFrame({"time": [ts1, ts2]}, dtype=object) + tm.assert_frame_equal(results, expected) + + results = concat([df1, df3]).reset_index(drop=True) + expected = DataFrame({"time": [ts1, ts3]}, dtype=object) + tm.assert_frame_equal(results, expected) + + results = concat([df2, df3]).reset_index(drop=True) + expected = DataFrame({"time": [ts2, ts3]}) + tm.assert_frame_equal(results, expected) + + def test_concat_multiindex_with_tz(self): + # GH 6606 + df = DataFrame( + { + "dt": DatetimeIndex( + [ + datetime(2014, 1, 1), + datetime(2014, 1, 2), + datetime(2014, 1, 3), + ], + dtype="M8[ns, US/Pacific]", + ), + "b": ["A", "B", "C"], + "c": [1, 2, 3], + "d": [4, 5, 6], + } + ) + df = df.set_index(["dt", "b"]) + + exp_idx1 = DatetimeIndex( + ["2014-01-01", "2014-01-02", "2014-01-03"] * 2, + dtype="M8[ns, US/Pacific]", + name="dt", + ) + exp_idx2 = Index(["A", "B", "C"] * 2, name="b") + exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2]) + expected = DataFrame( + {"c": [1, 2, 3] * 2, "d": [4, 5, 6] * 2}, index=exp_idx, columns=["c", "d"] + ) + + result = concat([df, df]) + tm.assert_frame_equal(result, expected) + + def test_concat_tz_not_aligned(self): + # GH#22796 + ts = pd.to_datetime([1, 2]).tz_localize("UTC") + a = DataFrame({"A": ts}) + b = DataFrame({"A": ts, "B": ts}) + result = concat([a, b], sort=True, ignore_index=True) + expected = DataFrame( + {"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)} + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "t1", + [ + "2015-01-01", + pytest.param( + pd.NaT, + marks=pytest.mark.xfail( + reason="GH23037 incorrect dtype when concatenating" + ), + ), + ], + ) + def test_concat_tz_NaT(self, t1): + # GH#22796 + # Concatenating tz-aware multicolumn DataFrames + ts1 = Timestamp(t1, tz="UTC") + ts2 = Timestamp("2015-01-01", tz="UTC") + ts3 = Timestamp("2015-01-01", tz="UTC") + + df1 = DataFrame([[ts1, ts2]]) + df2 = DataFrame([[ts3]]) + + result = concat([df1, df2]) + expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0]) + + tm.assert_frame_equal(result, expected) + + def test_concat_tz_with_empty(self): + # GH 9188 + result = concat( + [DataFrame(date_range("2000", periods=1, tz="UTC")), DataFrame()] + ) + expected = DataFrame(date_range("2000", periods=1, tz="UTC")) + tm.assert_frame_equal(result, expected) + + +class TestPeriodConcat: + def test_concat_period_series(self): + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="D")) + expected = Series([x[0], x[1], y[0], y[1]], dtype="Period[D]") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + def test_concat_period_multiple_freq_series(self): + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="M")) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + assert result.dtype == "object" + + def test_concat_period_other_series(self): + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="M")) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + assert result.dtype == "object" + + def test_concat_period_other_series2(self): + # non-period + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"])) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + assert result.dtype == "object" + + def test_concat_period_other_series3(self): + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(["A", "B"]) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + assert result.dtype == "object" + + +def test_concat_timedelta64_block(): + rng = to_timedelta(np.arange(10), unit="s") + + df = DataFrame({"time": rng}) + + result = concat([df, df]) + tm.assert_frame_equal(result.iloc[:10], df) + tm.assert_frame_equal(result.iloc[10:], df) + + +def test_concat_multiindex_datetime_nat(): + # GH#44900 + left = DataFrame({"a": 1}, index=MultiIndex.from_tuples([(1, pd.NaT)])) + right = DataFrame( + {"b": 2}, index=MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)]) + ) + result = concat([left, right], axis="columns") + expected = DataFrame( + {"a": [1.0, np.nan], "b": 2}, MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)]) + ) + tm.assert_frame_equal(result, expected) + + +def test_concat_float_datetime64(using_array_manager): + # GH#32934 + df_time = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}) + df_float = DataFrame({"A": pd.array([1.0], dtype="float64")}) + + expected = DataFrame( + { + "A": [ + pd.array(["2000"], dtype="datetime64[ns]")[0], + pd.array([1.0], dtype="float64")[0], + ] + }, + index=[0, 0], + ) + result = concat([df_time, df_float]) + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"A": pd.array([], dtype="object")}) + result = concat([df_time.iloc[:0], df_float.iloc[:0]]) + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"A": pd.array([1.0], dtype="object")}) + result = concat([df_time.iloc[:0], df_float]) + tm.assert_frame_equal(result, expected) + + if not using_array_manager: + expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}) + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([df_time, df_float.iloc[:0]]) + tm.assert_frame_equal(result, expected) + else: + expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}).astype( + {"A": "object"} + ) + result = concat([df_time, df_float.iloc[:0]]) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py new file mode 100644 index 0000000000000000000000000000000000000000..30ef0a934157badba23ce2eb07de691498035254 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py @@ -0,0 +1,295 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + RangeIndex, + Series, + concat, + date_range, +) +import pandas._testing as tm + + +class TestEmptyConcat: + def test_handle_empty_objects(self, sort, using_infer_string): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), columns=list("abcd") + ) + + dfcopy = df[:5].copy() + dfcopy["foo"] = "bar" + empty = df[5:5] + + frames = [dfcopy, empty, empty, df[5:]] + concatted = concat(frames, axis=0, sort=sort) + + expected = df.reindex(columns=["a", "b", "c", "d", "foo"]) + expected["foo"] = expected["foo"].astype( + object if not using_infer_string else "string[pyarrow_numpy]" + ) + expected.loc[0:4, "foo"] = "bar" + + tm.assert_frame_equal(concatted, expected) + + # empty as first element with time series + # GH3259 + df = DataFrame( + {"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s") + ) + empty = DataFrame() + result = concat([df, empty], axis=1) + tm.assert_frame_equal(result, df) + result = concat([empty, df], axis=1) + tm.assert_frame_equal(result, df) + + result = concat([df, empty]) + tm.assert_frame_equal(result, df) + result = concat([empty, df]) + tm.assert_frame_equal(result, df) + + def test_concat_empty_series(self): + # GH 11082 + s1 = Series([1, 2, 3], name="x") + s2 = Series(name="y", dtype="float64") + res = concat([s1, s2], axis=1) + exp = DataFrame( + {"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]}, + index=RangeIndex(3), + ) + tm.assert_frame_equal(res, exp) + + s1 = Series([1, 2, 3], name="x") + s2 = Series(name="y", dtype="float64") + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = concat([s1, s2], axis=0) + # name will be reset + exp = Series([1, 2, 3]) + tm.assert_series_equal(res, exp) + + # empty Series with no name + s1 = Series([1, 2, 3], name="x") + s2 = Series(name=None, dtype="float64") + res = concat([s1, s2], axis=1) + exp = DataFrame( + {"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]}, + columns=["x", 0], + index=RangeIndex(3), + ) + tm.assert_frame_equal(res, exp) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + @pytest.mark.parametrize("values", [[], [1, 2, 3]]) + def test_concat_empty_series_timelike(self, tz, values): + # GH 18447 + + first = Series([], dtype="M8[ns]").dt.tz_localize(tz) + dtype = None if values else np.float64 + second = Series(values, dtype=dtype) + + expected = DataFrame( + { + 0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz), + 1: values, + } + ) + result = concat([first, second], axis=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "left,right,expected", + [ + # booleans + (np.bool_, np.int32, np.object_), # changed from int32 in 2.0 GH#39817 + (np.bool_, np.float32, np.object_), + # datetime-like + ("m8[ns]", np.bool_, np.object_), + ("m8[ns]", np.int64, np.object_), + ("M8[ns]", np.bool_, np.object_), + ("M8[ns]", np.int64, np.object_), + # categorical + ("category", "category", "category"), + ("category", "object", "object"), + ], + ) + def test_concat_empty_series_dtypes(self, left, right, expected): + # GH#39817, GH#45101 + result = concat([Series(dtype=left), Series(dtype=right)]) + assert result.dtype == expected + + @pytest.mark.parametrize( + "dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"] + ) + def test_concat_empty_series_dtypes_match_roundtrips(self, dtype): + dtype = np.dtype(dtype) + + result = concat([Series(dtype=dtype)]) + assert result.dtype == dtype + + result = concat([Series(dtype=dtype), Series(dtype=dtype)]) + assert result.dtype == dtype + + @pytest.mark.parametrize("dtype", ["float64", "int8", "uint8", "m8[ns]", "M8[ns]"]) + @pytest.mark.parametrize( + "dtype2", + ["float64", "int8", "uint8", "m8[ns]", "M8[ns]"], + ) + def test_concat_empty_series_dtypes_roundtrips(self, dtype, dtype2): + # round-tripping with self & like self + if dtype == dtype2: + pytest.skip("same dtype is not applicable for test") + + def int_result_type(dtype, dtype2): + typs = {dtype.kind, dtype2.kind} + if not len(typs - {"i", "u", "b"}) and ( + dtype.kind == "i" or dtype2.kind == "i" + ): + return "i" + elif not len(typs - {"u", "b"}) and ( + dtype.kind == "u" or dtype2.kind == "u" + ): + return "u" + return None + + def float_result_type(dtype, dtype2): + typs = {dtype.kind, dtype2.kind} + if not len(typs - {"f", "i", "u"}) and ( + dtype.kind == "f" or dtype2.kind == "f" + ): + return "f" + return None + + def get_result_type(dtype, dtype2): + result = float_result_type(dtype, dtype2) + if result is not None: + return result + result = int_result_type(dtype, dtype2) + if result is not None: + return result + return "O" + + dtype = np.dtype(dtype) + dtype2 = np.dtype(dtype2) + expected = get_result_type(dtype, dtype2) + result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype + assert result.kind == expected + + def test_concat_empty_series_dtypes_triple(self): + assert ( + concat( + [Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)] + ).dtype + == np.object_ + ) + + def test_concat_empty_series_dtype_category_with_array(self): + # GH#18515 + assert ( + concat( + [Series(np.array([]), dtype="category"), Series(dtype="float64")] + ).dtype + == "float64" + ) + + def test_concat_empty_series_dtypes_sparse(self): + result = concat( + [ + Series(dtype="float64").astype("Sparse"), + Series(dtype="float64").astype("Sparse"), + ] + ) + assert result.dtype == "Sparse[float64]" + + result = concat( + [Series(dtype="float64").astype("Sparse"), Series(dtype="float64")] + ) + expected = pd.SparseDtype(np.float64) + assert result.dtype == expected + + result = concat( + [Series(dtype="float64").astype("Sparse"), Series(dtype="object")] + ) + expected = pd.SparseDtype("object") + assert result.dtype == expected + + def test_concat_empty_df_object_dtype(self): + # GH 9149 + df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]}) + df_2 = DataFrame(columns=df_1.columns) + result = concat([df_1, df_2], axis=0) + expected = df_1.astype(object) + tm.assert_frame_equal(result, expected) + + def test_concat_empty_dataframe_dtypes(self): + df = DataFrame(columns=list("abc")) + df["a"] = df["a"].astype(np.bool_) + df["b"] = df["b"].astype(np.int32) + df["c"] = df["c"].astype(np.float64) + + result = concat([df, df]) + assert result["a"].dtype == np.bool_ + assert result["b"].dtype == np.int32 + assert result["c"].dtype == np.float64 + + result = concat([df, df.astype(np.float64)]) + assert result["a"].dtype == np.object_ + assert result["b"].dtype == np.float64 + assert result["c"].dtype == np.float64 + + def test_concat_inner_join_empty(self): + # GH 15328 + df_empty = DataFrame() + df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64") + df_expected = DataFrame({"a": []}, index=RangeIndex(0), dtype="int64") + + result = concat([df_a, df_empty], axis=1, join="inner") + tm.assert_frame_equal(result, df_expected) + + result = concat([df_a, df_empty], axis=1, join="outer") + tm.assert_frame_equal(result, df_a) + + def test_empty_dtype_coerce(self): + # xref to #12411 + # xref to #12045 + # xref to #11594 + # see below + + # 10571 + df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"]) + df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"]) + result = concat([df1, df2]) + expected = df1.dtypes + tm.assert_series_equal(result.dtypes, expected) + + def test_concat_empty_dataframe(self): + # 39037 + df1 = DataFrame(columns=["a", "b"]) + df2 = DataFrame(columns=["b", "c"]) + result = concat([df1, df2, df1]) + expected = DataFrame(columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) + + df3 = DataFrame(columns=["a", "b"]) + df4 = DataFrame(columns=["b"]) + result = concat([df3, df4]) + expected = DataFrame(columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_concat_empty_dataframe_different_dtypes(self, using_infer_string): + # 39037 + df1 = DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + df2 = DataFrame({"a": [1, 2, 3]}) + + result = concat([df1[:0], df2[:0]]) + assert result["a"].dtype == np.int64 + assert result["b"].dtype == np.object_ if not using_infer_string else "string" + + def test_concat_to_empty_ea(self): + """48510 `concat` to an empty EA should maintain type EA dtype.""" + df_empty = DataFrame({"a": pd.array([], dtype=pd.Int64Dtype())}) + df_new = DataFrame({"a": pd.array([1, 2, 3], dtype=pd.Int64Dtype())}) + expected = df_new.copy() + result = concat([df_empty, df_new]) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py new file mode 100644 index 0000000000000000000000000000000000000000..52bb9fa0f151bb802d0cb440982230bd99c65b76 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py @@ -0,0 +1,472 @@ +from copy import deepcopy + +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + concat, +) +import pandas._testing as tm + + +class TestIndexConcat: + def test_concat_ignore_index(self, sort): + frame1 = DataFrame( + {"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]} + ) + frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]}) + frame1.index = Index(["x", "y", "z"]) + frame2.index = Index(["x", "y", "q"]) + + v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort) + + nan = np.nan + expected = DataFrame( + [ + [nan, nan, nan, 4.3], + ["a", 1, 4.5, 5.2], + ["b", 2, 3.2, 2.2], + ["c", 3, 1.2, nan], + ], + index=Index(["q", "x", "y", "z"]), + ) + if not sort: + expected = expected.loc[["x", "y", "z", "q"]] + + tm.assert_frame_equal(v1, expected) + + @pytest.mark.parametrize( + "name_in1,name_in2,name_in3,name_out", + [ + ("idx", "idx", "idx", "idx"), + ("idx", "idx", None, None), + ("idx", None, None, None), + ("idx1", "idx2", None, None), + ("idx1", "idx1", "idx2", None), + ("idx1", "idx2", "idx3", None), + (None, None, None, None), + ], + ) + def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out): + # GH13475 + indices = [ + Index(["a", "b", "c"], name=name_in1), + Index(["b", "c", "d"], name=name_in2), + Index(["c", "d", "e"], name=name_in3), + ] + frames = [ + DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"]) + ] + result = concat(frames, axis=1) + + exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out) + expected = DataFrame( + { + "x": [0, 1, 2, np.nan, np.nan], + "y": [np.nan, 0, 1, 2, np.nan], + "z": [np.nan, np.nan, 0, 1, 2], + }, + index=exp_ind, + ) + + tm.assert_frame_equal(result, expected) + + def test_concat_rename_index(self): + a = DataFrame( + np.random.default_rng(2).random((3, 3)), + columns=list("ABC"), + index=Index(list("abc"), name="index_a"), + ) + b = DataFrame( + np.random.default_rng(2).random((3, 3)), + columns=list("ABC"), + index=Index(list("abc"), name="index_b"), + ) + + result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"]) + + exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"]) + names = list(exp.index.names) + names[1] = "lvl1" + exp.index.set_names(names, inplace=True) + + tm.assert_frame_equal(result, exp) + assert result.index.names == exp.index.names + + def test_concat_copy_index_series(self, axis, using_copy_on_write): + # GH 29879 + ser = Series([1, 2]) + comb = concat([ser, ser], axis=axis, copy=True) + if not using_copy_on_write or axis in [0, "index"]: + assert comb.index is not ser.index + else: + assert comb.index is ser.index + + def test_concat_copy_index_frame(self, axis, using_copy_on_write): + # GH 29879 + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + comb = concat([df, df], axis=axis, copy=True) + if not using_copy_on_write: + assert not comb.index.is_(df.index) + assert not comb.columns.is_(df.columns) + elif axis in [0, "index"]: + assert not comb.index.is_(df.index) + assert comb.columns.is_(df.columns) + elif axis in [1, "columns"]: + assert comb.index.is_(df.index) + assert not comb.columns.is_(df.columns) + + def test_default_index(self): + # is_series and ignore_index + s1 = Series([1, 2, 3], name="x") + s2 = Series([4, 5, 6], name="y") + res = concat([s1, s2], axis=1, ignore_index=True) + assert isinstance(res.columns, pd.RangeIndex) + exp = DataFrame([[1, 4], [2, 5], [3, 6]]) + # use check_index_type=True to check the result have + # RangeIndex (default index) + tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) + + # is_series and all inputs have no names + s1 = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + res = concat([s1, s2], axis=1, ignore_index=False) + assert isinstance(res.columns, pd.RangeIndex) + exp = DataFrame([[1, 4], [2, 5], [3, 6]]) + exp.columns = pd.RangeIndex(2) + tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) + + # is_dataframe and ignore_index + df1 = DataFrame({"A": [1, 2], "B": [5, 6]}) + df2 = DataFrame({"A": [3, 4], "B": [7, 8]}) + + res = concat([df1, df2], axis=0, ignore_index=True) + exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"]) + tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) + + res = concat([df1, df2], axis=1, ignore_index=True) + exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]]) + tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) + + def test_dups_index(self): + # GH 4771 + + # single dtypes + df = DataFrame( + np.random.default_rng(2).integers(0, 10, size=40).reshape(10, 4), + columns=["A", "A", "C", "C"], + ) + + result = concat([df, df], axis=1) + tm.assert_frame_equal(result.iloc[:, :4], df) + tm.assert_frame_equal(result.iloc[:, 4:], df) + + result = concat([df, df], axis=0) + tm.assert_frame_equal(result.iloc[:10], df) + tm.assert_frame_equal(result.iloc[10:], df) + + # multi dtypes + df = concat( + [ + DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "A", "B", "B"], + ), + DataFrame( + np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2), + columns=["A", "C"], + ), + ], + axis=1, + ) + + result = concat([df, df], axis=1) + tm.assert_frame_equal(result.iloc[:, :6], df) + tm.assert_frame_equal(result.iloc[:, 6:], df) + + result = concat([df, df], axis=0) + tm.assert_frame_equal(result.iloc[:10], df) + tm.assert_frame_equal(result.iloc[10:], df) + + # append + result = df.iloc[0:8, :]._append(df.iloc[8:]) + tm.assert_frame_equal(result, df) + + result = df.iloc[0:8, :]._append(df.iloc[8:9])._append(df.iloc[9:10]) + tm.assert_frame_equal(result, df) + + expected = concat([df, df], axis=0) + result = df._append(df) + tm.assert_frame_equal(result, expected) + + +class TestMultiIndexConcat: + def test_concat_multiindex_with_keys(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + index = frame.index + result = concat([frame, frame], keys=[0, 1], names=["iteration"]) + + assert result.index.names == ("iteration",) + index.names + tm.assert_frame_equal(result.loc[0], frame) + tm.assert_frame_equal(result.loc[1], frame) + assert result.index.nlevels == 3 + + def test_concat_multiindex_with_none_in_index_names(self): + # GH 15787 + index = MultiIndex.from_product([[1], range(5)], names=["level1", None]) + df = DataFrame({"col": range(5)}, index=index, dtype=np.int32) + + result = concat([df, df], keys=[1, 2], names=["level2"]) + index = MultiIndex.from_product( + [[1, 2], [1], range(5)], names=["level2", "level1", None] + ) + expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32) + tm.assert_frame_equal(result, expected) + + result = concat([df, df[:2]], keys=[1, 2], names=["level2"]) + level2 = [1] * 5 + [2] * 2 + level1 = [1] * 7 + no_name = list(range(5)) + list(range(2)) + tuples = list(zip(level2, level1, no_name)) + index = MultiIndex.from_tuples(tuples, names=["level2", "level1", None]) + expected = DataFrame({"col": no_name}, index=index, dtype=np.int32) + tm.assert_frame_equal(result, expected) + + def test_concat_multiindex_rangeindex(self): + # GH13542 + # when multi-index levels are RangeIndex objects + # there is a bug in concat with objects of len 1 + + df = DataFrame(np.random.default_rng(2).standard_normal((9, 2))) + df.index = MultiIndex( + levels=[pd.RangeIndex(3), pd.RangeIndex(3)], + codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)], + ) + + res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]]) + exp = df.iloc[[2, 3, 4, 5], :] + tm.assert_frame_equal(res, exp) + + def test_concat_multiindex_dfs_with_deepcopy(self): + # GH 9967 + example_multiindex1 = MultiIndex.from_product([["a"], ["b"]]) + example_dataframe1 = DataFrame([0], index=example_multiindex1) + + example_multiindex2 = MultiIndex.from_product([["a"], ["c"]]) + example_dataframe2 = DataFrame([1], index=example_multiindex2) + + example_dict = {"s1": example_dataframe1, "s2": example_dataframe2} + expected_index = MultiIndex( + levels=[["s1", "s2"], ["a"], ["b", "c"]], + codes=[[0, 1], [0, 0], [0, 1]], + names=["testname", None, None], + ) + expected = DataFrame([[0], [1]], index=expected_index) + result_copy = concat(deepcopy(example_dict), names=["testname"]) + tm.assert_frame_equal(result_copy, expected) + result_no_copy = concat(example_dict, names=["testname"]) + tm.assert_frame_equal(result_no_copy, expected) + + @pytest.mark.parametrize( + "mi1_list", + [ + [["a"], range(2)], + [["b"], np.arange(2.0, 4.0)], + [["c"], ["A", "B"]], + [["d"], pd.date_range(start="2017", end="2018", periods=2)], + ], + ) + @pytest.mark.parametrize( + "mi2_list", + [ + [["a"], range(2)], + [["b"], np.arange(2.0, 4.0)], + [["c"], ["A", "B"]], + [["d"], pd.date_range(start="2017", end="2018", periods=2)], + ], + ) + def test_concat_with_various_multiindex_dtypes( + self, mi1_list: list, mi2_list: list + ): + # GitHub #23478 + mi1 = MultiIndex.from_product(mi1_list) + mi2 = MultiIndex.from_product(mi2_list) + + df1 = DataFrame(np.zeros((1, len(mi1))), columns=mi1) + df2 = DataFrame(np.zeros((1, len(mi2))), columns=mi2) + + if mi1_list[0] == mi2_list[0]: + expected_mi = MultiIndex( + levels=[mi1_list[0], list(mi1_list[1])], + codes=[[0, 0, 0, 0], [0, 1, 0, 1]], + ) + else: + expected_mi = MultiIndex( + levels=[ + mi1_list[0] + mi2_list[0], + list(mi1_list[1]) + list(mi2_list[1]), + ], + codes=[[0, 0, 1, 1], [0, 1, 2, 3]], + ) + + expected_df = DataFrame(np.zeros((1, len(expected_mi))), columns=expected_mi) + + with tm.assert_produces_warning(None): + result_df = concat((df1, df2), axis=1) + + tm.assert_frame_equal(expected_df, result_df) + + def test_concat_multiindex_(self): + # GitHub #44786 + df = DataFrame({"col": ["a", "b", "c"]}, index=["1", "2", "2"]) + df = concat([df], keys=["X"]) + + iterables = [["X"], ["1", "2", "2"]] + result_index = df.index + expected_index = MultiIndex.from_product(iterables) + + tm.assert_index_equal(result_index, expected_index) + + result_df = df + expected_df = DataFrame( + {"col": ["a", "b", "c"]}, index=MultiIndex.from_product(iterables) + ) + tm.assert_frame_equal(result_df, expected_df) + + def test_concat_with_key_not_unique(self): + # GitHub #46519 + df1 = DataFrame({"name": [1]}) + df2 = DataFrame({"name": [2]}) + df3 = DataFrame({"name": [3]}) + df_a = concat([df1, df2, df3], keys=["x", "y", "x"]) + # the warning is caused by indexing unsorted multi-index + with tm.assert_produces_warning( + PerformanceWarning, match="indexing past lexsort depth" + ): + out_a = df_a.loc[("x", 0), :] + + df_b = DataFrame( + {"name": [1, 2, 3]}, index=Index([("x", 0), ("y", 0), ("x", 0)]) + ) + with tm.assert_produces_warning( + PerformanceWarning, match="indexing past lexsort depth" + ): + out_b = df_b.loc[("x", 0)] + + tm.assert_frame_equal(out_a, out_b) + + df1 = DataFrame({"name": ["a", "a", "b"]}) + df2 = DataFrame({"name": ["a", "b"]}) + df3 = DataFrame({"name": ["c", "d"]}) + df_a = concat([df1, df2, df3], keys=["x", "y", "x"]) + with tm.assert_produces_warning( + PerformanceWarning, match="indexing past lexsort depth" + ): + out_a = df_a.loc[("x", 0), :] + + df_b = DataFrame( + { + "a": ["x", "x", "x", "y", "y", "x", "x"], + "b": [0, 1, 2, 0, 1, 0, 1], + "name": list("aababcd"), + } + ).set_index(["a", "b"]) + df_b.index.names = [None, None] + with tm.assert_produces_warning( + PerformanceWarning, match="indexing past lexsort depth" + ): + out_b = df_b.loc[("x", 0), :] + + tm.assert_frame_equal(out_a, out_b) + + def test_concat_with_duplicated_levels(self): + # keyword levels should be unique + df1 = DataFrame({"A": [1]}, index=["x"]) + df2 = DataFrame({"A": [1]}, index=["y"]) + msg = r"Level values not unique: \['x', 'y', 'y'\]" + with pytest.raises(ValueError, match=msg): + concat([df1, df2], keys=["x", "y"], levels=[["x", "y", "y"]]) + + @pytest.mark.parametrize("levels", [[["x", "y"]], [["x", "y", "y"]]]) + def test_concat_with_levels_with_none_keys(self, levels): + df1 = DataFrame({"A": [1]}, index=["x"]) + df2 = DataFrame({"A": [1]}, index=["y"]) + msg = "levels supported only when keys is not None" + with pytest.raises(ValueError, match=msg): + concat([df1, df2], levels=levels) + + def test_concat_range_index_result(self): + # GH#47501 + df1 = DataFrame({"a": [1, 2]}) + df2 = DataFrame({"b": [1, 2]}) + + result = concat([df1, df2], sort=True, axis=1) + expected = DataFrame({"a": [1, 2], "b": [1, 2]}) + tm.assert_frame_equal(result, expected) + expected_index = pd.RangeIndex(0, 2) + tm.assert_index_equal(result.index, expected_index, exact=True) + + def test_concat_index_keep_dtype(self): + # GH#47329 + df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype="object")) + df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="object")) + result = concat([df1, df2], ignore_index=True, join="outer", sort=True) + expected = DataFrame( + [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="object") + ) + tm.assert_frame_equal(result, expected) + + def test_concat_index_keep_dtype_ea_numeric(self, any_numeric_ea_dtype): + # GH#47329 + df1 = DataFrame( + [[0, 1, 1]], columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype) + ) + df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=any_numeric_ea_dtype)) + result = concat([df1, df2], ignore_index=True, join="outer", sort=True) + expected = DataFrame( + [[0, 1, 1.0], [0, 1, np.nan]], + columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["Int8", "Int16", "Int32"]) + def test_concat_index_find_common(self, dtype): + # GH#47329 + df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype)) + df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="Int32")) + result = concat([df1, df2], ignore_index=True, join="outer", sort=True) + expected = DataFrame( + [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="Int32") + ) + tm.assert_frame_equal(result, expected) + + def test_concat_axis_1_sort_false_rangeindex(self, using_infer_string): + # GH 46675 + s1 = Series(["a", "b", "c"]) + s2 = Series(["a", "b"]) + s3 = Series(["a", "b", "c", "d"]) + s4 = Series( + [], dtype=object if not using_infer_string else "string[pyarrow_numpy]" + ) + result = concat( + [s1, s2, s3, s4], sort=False, join="outer", ignore_index=False, axis=1 + ) + expected = DataFrame( + [ + ["a"] * 3 + [np.nan], + ["b"] * 3 + [np.nan], + ["c", np.nan] * 2, + [np.nan] * 2 + ["d"] + [np.nan], + ], + dtype=object if not using_infer_string else "string[pyarrow_numpy]", + ) + tm.assert_frame_equal( + result, expected, check_index_type=True, check_column_type=True + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py new file mode 100644 index 0000000000000000000000000000000000000000..b1c44fc0debc397659354a3fa26c9ab6d33b8b40 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py @@ -0,0 +1,54 @@ +from io import StringIO + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + concat, + read_csv, +) +import pandas._testing as tm + + +class TestInvalidConcat: + @pytest.mark.parametrize("obj", [1, {}, [1, 2], (1, 2)]) + def test_concat_invalid(self, obj): + # trying to concat a ndframe with a non-ndframe + df1 = DataFrame(range(2)) + msg = ( + f"cannot concatenate object of type '{type(obj)}'; " + "only Series and DataFrame objs are valid" + ) + with pytest.raises(TypeError, match=msg): + concat([df1, obj]) + + def test_concat_invalid_first_argument(self): + df1 = DataFrame(range(2)) + msg = ( + "first argument must be an iterable of pandas " + 'objects, you passed an object of type "DataFrame"' + ) + with pytest.raises(TypeError, match=msg): + concat(df1) + + def test_concat_generator_obj(self): + # generator ok though + concat(DataFrame(np.random.default_rng(2).random((5, 5))) for _ in range(3)) + + def test_concat_textreader_obj(self): + # text reader ok + # GH6583 + data = """index,A,B,C,D + foo,2,3,4,5 + bar,7,8,9,10 + baz,12,13,14,15 + qux,12,13,14,15 + foo2,12,13,14,15 + bar2,12,13,14,15 + """ + + with read_csv(StringIO(data), chunksize=1) as reader: + result = concat(reader, ignore_index=True) + expected = read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py new file mode 100644 index 0000000000000000000000000000000000000000..c12b835cb61e1545d3fc5a61f16f195166edd1fd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py @@ -0,0 +1,175 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + concat, + date_range, +) +import pandas._testing as tm + + +class TestSeriesConcat: + def test_concat_series(self): + ts = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20), + name="foo", + ) + ts.name = "foo" + + pieces = [ts[:5], ts[5:15], ts[15:]] + + result = concat(pieces) + tm.assert_series_equal(result, ts) + assert result.name == ts.name + + result = concat(pieces, keys=[0, 1, 2]) + expected = ts.copy() + + ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]")) + + exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))] + exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes) + expected.index = exp_index + tm.assert_series_equal(result, expected) + + def test_concat_empty_and_non_empty_series_regression(self): + # GH 18187 regression test + s1 = Series([1]) + s2 = Series([], dtype=object) + + expected = s1 + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([s1, s2]) + tm.assert_series_equal(result, expected) + + def test_concat_series_axis1(self): + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + + pieces = [ts[:-2], ts[2:], ts[2:-2]] + + result = concat(pieces, axis=1) + expected = DataFrame(pieces).T + tm.assert_frame_equal(result, expected) + + result = concat(pieces, keys=["A", "B", "C"], axis=1) + expected = DataFrame(pieces, index=["A", "B", "C"]).T + tm.assert_frame_equal(result, expected) + + def test_concat_series_axis1_preserves_series_names(self): + # preserve series names, #2489 + s = Series(np.random.default_rng(2).standard_normal(5), name="A") + s2 = Series(np.random.default_rng(2).standard_normal(5), name="B") + + result = concat([s, s2], axis=1) + expected = DataFrame({"A": s, "B": s2}) + tm.assert_frame_equal(result, expected) + + s2.name = None + result = concat([s, s2], axis=1) + tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object")) + + def test_concat_series_axis1_with_reindex(self, sort): + # must reindex, #2603 + s = Series( + np.random.default_rng(2).standard_normal(3), index=["c", "a", "b"], name="A" + ) + s2 = Series( + np.random.default_rng(2).standard_normal(4), + index=["d", "a", "b", "c"], + name="B", + ) + result = concat([s, s2], axis=1, sort=sort) + expected = DataFrame({"A": s, "B": s2}, index=["c", "a", "b", "d"]) + if sort: + expected = expected.sort_index() + tm.assert_frame_equal(result, expected) + + def test_concat_series_axis1_names_applied(self): + # ensure names argument is not ignored on axis=1, #23490 + s = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"]) + expected = DataFrame( + [[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A") + ) + tm.assert_frame_equal(result, expected) + + result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"]) + expected = DataFrame( + [[1, 4], [2, 5], [3, 6]], + columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]), + ) + tm.assert_frame_equal(result, expected) + + def test_concat_series_axis1_same_names_ignore_index(self): + dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1] + s1 = Series( + np.random.default_rng(2).standard_normal(len(dates)), + index=dates, + name="value", + ) + s2 = Series( + np.random.default_rng(2).standard_normal(len(dates)), + index=dates, + name="value", + ) + + result = concat([s1, s2], axis=1, ignore_index=True) + expected = Index(range(2)) + + tm.assert_index_equal(result.columns, expected, exact=True) + + @pytest.mark.parametrize( + "s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))] + ) + def test_concat_series_name_npscalar_tuple(self, s1name, s2name): + # GH21015 + s1 = Series({"a": 1, "b": 2}, name=s1name) + s2 = Series({"c": 5, "d": 6}, name=s2name) + result = concat([s1, s2]) + expected = Series({"a": 1, "b": 2, "c": 5, "d": 6}) + tm.assert_series_equal(result, expected) + + def test_concat_series_partial_columns_names(self): + # GH10698 + named_series = Series([1, 2], name="foo") + unnamed_series1 = Series([1, 2]) + unnamed_series2 = Series([4, 5]) + + result = concat([named_series, unnamed_series1, unnamed_series2], axis=1) + expected = DataFrame( + {"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1] + ) + tm.assert_frame_equal(result, expected) + + result = concat( + [named_series, unnamed_series1, unnamed_series2], + axis=1, + keys=["red", "blue", "yellow"], + ) + expected = DataFrame( + {"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]}, + columns=["red", "blue", "yellow"], + ) + tm.assert_frame_equal(result, expected) + + result = concat( + [named_series, unnamed_series1, unnamed_series2], axis=1, ignore_index=True + ) + expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]}) + tm.assert_frame_equal(result, expected) + + def test_concat_series_length_one_reversed(self, frame_or_series): + # GH39401 + obj = frame_or_series([100]) + result = concat([obj.iloc[::-1]]) + tm.assert_equal(result, obj) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_sort.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_sort.py new file mode 100644 index 0000000000000000000000000000000000000000..2724f819588933cc307dd396fdcd024f04c38eaa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_sort.py @@ -0,0 +1,118 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + + +class TestConcatSort: + def test_concat_sorts_columns(self, sort): + # GH-4588 + df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) + df2 = DataFrame({"a": [3, 4], "c": [5, 6]}) + + # for sort=True/None + expected = DataFrame( + {"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]}, + columns=["a", "b", "c"], + ) + + if sort is False: + expected = expected[["b", "a", "c"]] + + # default + with tm.assert_produces_warning(None): + result = pd.concat([df1, df2], ignore_index=True, sort=sort) + tm.assert_frame_equal(result, expected) + + def test_concat_sorts_index(self, sort): + df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"]) + df2 = DataFrame({"b": [1, 2]}, index=["a", "b"]) + + # For True/None + expected = DataFrame( + {"a": [2, 3, 1], "b": [1, 2, None]}, + index=["a", "b", "c"], + columns=["a", "b"], + ) + if sort is False: + expected = expected.loc[["c", "a", "b"]] + + # Warn and sort by default + with tm.assert_produces_warning(None): + result = pd.concat([df1, df2], axis=1, sort=sort) + tm.assert_frame_equal(result, expected) + + def test_concat_inner_sort(self, sort): + # https://github.com/pandas-dev/pandas/pull/20613 + df1 = DataFrame( + {"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"] + ) + df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4]) + + with tm.assert_produces_warning(None): + # unset sort should *not* warn for inner join + # since that never sorted + result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True) + + expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"]) + if sort is True: + expected = expected[["a", "b"]] + tm.assert_frame_equal(result, expected) + + def test_concat_aligned_sort(self): + # GH-4588 + df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"]) + result = pd.concat([df, df], sort=True, ignore_index=True) + expected = DataFrame( + {"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]}, + columns=["a", "b", "c"], + ) + tm.assert_frame_equal(result, expected) + + result = pd.concat( + [df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True + ) + expected = expected[["b", "c"]] + tm.assert_frame_equal(result, expected) + + def test_concat_aligned_sort_does_not_raise(self): + # GH-4588 + # We catch TypeErrors from sorting internally and do not re-raise. + df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"]) + expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"]) + result = pd.concat([df, df], ignore_index=True, sort=True) + tm.assert_frame_equal(result, expected) + + def test_concat_frame_with_sort_false(self): + # GH 43375 + result = pd.concat( + [DataFrame({i: i}, index=[i]) for i in range(2, 0, -1)], sort=False + ) + expected = DataFrame([[2, np.nan], [np.nan, 1]], index=[2, 1], columns=[2, 1]) + + tm.assert_frame_equal(result, expected) + + # GH 37937 + df1 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[1, 2, 3]) + df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}, index=[3, 1, 6]) + result = pd.concat([df2, df1], axis=1, sort=False) + expected = DataFrame( + [ + [7.0, 10.0, 3.0, 6.0], + [8.0, 11.0, 1.0, 4.0], + [9.0, 12.0, np.nan, np.nan], + [np.nan, np.nan, 2.0, 5.0], + ], + index=[3, 1, 6, 2], + columns=["c", "d", "a", "b"], + ) + tm.assert_frame_equal(result, expected) + + def test_concat_sort_none_raises(self): + # GH#41518 + df = DataFrame({1: [1, 2], "a": [3, 4]}) + msg = "The 'sort' keyword only accepts boolean values; None was passed." + with pytest.raises(ValueError, match=msg): + pd.concat([df, df], sort=None) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..293c040df202a706a08135a9799af532c0ea0ead Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed7a48b617e663cd1fd4a8c7f49efb26fde7a385 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..385f2678f868f00d5ca08b8a9c470129ceb80e3a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_asof.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_asof.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9498ada3f6b3cd5d4e604bda3ef71f3c2bfd96ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_asof.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e61069e949887697054b868dce37f56a483fb6f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f21fe3276ccb9b9f4630a8d0986cd9b011e1483b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c35e186cf713f49f3cf572e02ee76b6fa2cd0ec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..763f0a61938e604073bfac8356cf6d72c7fbfddf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_join.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_join.py new file mode 100644 index 0000000000000000000000000000000000000000..db5a0437a14f0c67effe8351930fb5d39f4fa514 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_join.py @@ -0,0 +1,1101 @@ +import re + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + MultiIndex, + Series, + Timestamp, + bdate_range, + concat, + merge, + option_context, +) +import pandas._testing as tm + + +def get_test_data(ngroups=8, n=50): + unique_groups = list(range(ngroups)) + arr = np.asarray(np.tile(unique_groups, n // ngroups)) + + if len(arr) < n: + arr = np.asarray(list(arr) + unique_groups[: n - len(arr)]) + + np.random.default_rng(2).shuffle(arr) + return arr + + +class TestJoin: + # aggregate multiple columns + @pytest.fixture + def df(self): + df = DataFrame( + { + "key1": get_test_data(), + "key2": get_test_data(), + "data1": np.random.default_rng(2).standard_normal(50), + "data2": np.random.default_rng(2).standard_normal(50), + } + ) + + # exclude a couple keys for fun + df = df[df["key2"] > 1] + return df + + @pytest.fixture + def df2(self): + return DataFrame( + { + "key1": get_test_data(n=10), + "key2": get_test_data(ngroups=4, n=10), + "value": np.random.default_rng(2).standard_normal(10), + } + ) + + @pytest.fixture + def target_source(self): + data = { + "A": [0.0, 1.0, 2.0, 3.0, 4.0], + "B": [0.0, 1.0, 0.0, 1.0, 0.0], + "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], + "D": bdate_range("1/1/2009", periods=5), + } + target = DataFrame(data, index=Index(["a", "b", "c", "d", "e"], dtype=object)) + + # Join on string value + + source = DataFrame( + {"MergedA": data["A"], "MergedD": data["D"]}, index=data["C"] + ) + return target, source + + def test_left_outer_join(self, df, df2): + joined_key2 = merge(df, df2, on="key2") + _check_join(df, df2, joined_key2, ["key2"], how="left") + + joined_both = merge(df, df2) + _check_join(df, df2, joined_both, ["key1", "key2"], how="left") + + def test_right_outer_join(self, df, df2): + joined_key2 = merge(df, df2, on="key2", how="right") + _check_join(df, df2, joined_key2, ["key2"], how="right") + + joined_both = merge(df, df2, how="right") + _check_join(df, df2, joined_both, ["key1", "key2"], how="right") + + def test_full_outer_join(self, df, df2): + joined_key2 = merge(df, df2, on="key2", how="outer") + _check_join(df, df2, joined_key2, ["key2"], how="outer") + + joined_both = merge(df, df2, how="outer") + _check_join(df, df2, joined_both, ["key1", "key2"], how="outer") + + def test_inner_join(self, df, df2): + joined_key2 = merge(df, df2, on="key2", how="inner") + _check_join(df, df2, joined_key2, ["key2"], how="inner") + + joined_both = merge(df, df2, how="inner") + _check_join(df, df2, joined_both, ["key1", "key2"], how="inner") + + def test_handle_overlap(self, df, df2): + joined = merge(df, df2, on="key2", suffixes=(".foo", ".bar")) + + assert "key1.foo" in joined + assert "key1.bar" in joined + + def test_handle_overlap_arbitrary_key(self, df, df2): + joined = merge( + df, + df2, + left_on="key2", + right_on="key1", + suffixes=(".foo", ".bar"), + ) + assert "key1.foo" in joined + assert "key2.bar" in joined + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + def test_join_on(self, target_source, infer_string): + target, source = target_source + + merged = target.join(source, on="C") + tm.assert_series_equal(merged["MergedA"], target["A"], check_names=False) + tm.assert_series_equal(merged["MergedD"], target["D"], check_names=False) + + # join with duplicates (fix regression from DataFrame/Matrix merge) + df = DataFrame({"key": ["a", "a", "b", "b", "c"]}) + df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"]) + joined = df.join(df2, on="key") + expected = DataFrame( + {"key": ["a", "a", "b", "b", "c"], "value": [0, 0, 1, 1, 2]} + ) + tm.assert_frame_equal(joined, expected) + + # Test when some are missing + df_a = DataFrame([[1], [2], [3]], index=["a", "b", "c"], columns=["one"]) + df_b = DataFrame([["foo"], ["bar"]], index=[1, 2], columns=["two"]) + df_c = DataFrame([[1], [2]], index=[1, 2], columns=["three"]) + joined = df_a.join(df_b, on="one") + joined = joined.join(df_c, on="one") + assert np.isnan(joined["two"]["c"]) + assert np.isnan(joined["three"]["c"]) + + # merge column not p resent + with pytest.raises(KeyError, match="^'E'$"): + target.join(source, on="E") + + # overlap + source_copy = source.copy() + msg = ( + "You are trying to merge on float64 and object|string columns for key " + "'A'. If you wish to proceed you should use pd.concat" + ) + with pytest.raises(ValueError, match=msg): + target.join(source_copy, on="A") + + def test_join_on_fails_with_different_right_index(self): + df = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=3), + "b": np.random.default_rng(2).standard_normal(3), + } + ) + df2 = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=10), + "b": np.random.default_rng(2).standard_normal(10), + }, + index=MultiIndex.from_product([range(5), ["A", "B"]]), + ) + msg = r'len\(left_on\) must equal the number of levels in the index of "right"' + with pytest.raises(ValueError, match=msg): + merge(df, df2, left_on="a", right_index=True) + + def test_join_on_fails_with_different_left_index(self): + df = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=3), + "b": np.random.default_rng(2).standard_normal(3), + }, + index=MultiIndex.from_arrays([range(3), list("abc")]), + ) + df2 = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + msg = r'len\(right_on\) must equal the number of levels in the index of "left"' + with pytest.raises(ValueError, match=msg): + merge(df, df2, right_on="b", left_index=True) + + def test_join_on_fails_with_different_column_counts(self): + df = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=3), + "b": np.random.default_rng(2).standard_normal(3), + } + ) + df2 = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=10), + "b": np.random.default_rng(2).standard_normal(10), + }, + index=MultiIndex.from_product([range(5), ["A", "B"]]), + ) + msg = r"len\(right_on\) must equal len\(left_on\)" + with pytest.raises(ValueError, match=msg): + merge(df, df2, right_on="a", left_on=["a", "b"]) + + @pytest.mark.parametrize("wrong_type", [2, "str", None, np.array([0, 1])]) + def test_join_on_fails_with_wrong_object_type(self, wrong_type): + # GH12081 - original issue + + # GH21220 - merging of Series and DataFrame is now allowed + # Edited test to remove the Series object from test parameters + + df = DataFrame({"a": [1, 1]}) + msg = ( + "Can only merge Series or DataFrame objects, " + f"a {type(wrong_type)} was passed" + ) + with pytest.raises(TypeError, match=msg): + merge(wrong_type, df, left_on="a", right_on="a") + with pytest.raises(TypeError, match=msg): + merge(df, wrong_type, left_on="a", right_on="a") + + def test_join_on_pass_vector(self, target_source): + target, source = target_source + expected = target.join(source, on="C") + expected = expected.rename(columns={"C": "key_0"}) + expected = expected[["key_0", "A", "B", "D", "MergedA", "MergedD"]] + + join_col = target.pop("C") + result = target.join(source, on=join_col) + tm.assert_frame_equal(result, expected) + + def test_join_with_len0(self, target_source): + # nothing to merge + target, source = target_source + merged = target.join(source.reindex([]), on="C") + for col in source: + assert col in merged + assert merged[col].isna().all() + + merged2 = target.join(source.reindex([]), on="C", how="inner") + tm.assert_index_equal(merged2.columns, merged.columns) + assert len(merged2) == 0 + + def test_join_on_inner(self): + df = DataFrame({"key": ["a", "a", "d", "b", "b", "c"]}) + df2 = DataFrame({"value": [0, 1]}, index=["a", "b"]) + + joined = df.join(df2, on="key", how="inner") + + expected = df.join(df2, on="key") + expected = expected[expected["value"].notna()] + tm.assert_series_equal(joined["key"], expected["key"]) + tm.assert_series_equal(joined["value"], expected["value"], check_dtype=False) + tm.assert_index_equal(joined.index, expected.index) + + def test_join_on_singlekey_list(self): + df = DataFrame({"key": ["a", "a", "b", "b", "c"]}) + df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"]) + + # corner cases + joined = df.join(df2, on=["key"]) + expected = df.join(df2, on="key") + + tm.assert_frame_equal(joined, expected) + + def test_join_on_series(self, target_source): + target, source = target_source + result = target.join(source["MergedA"], on="C") + expected = target.join(source[["MergedA"]], on="C") + tm.assert_frame_equal(result, expected) + + def test_join_on_series_buglet(self): + # GH #638 + df = DataFrame({"a": [1, 1]}) + ds = Series([2], index=[1], name="b") + result = df.join(ds, on="a") + expected = DataFrame({"a": [1, 1], "b": [2, 2]}, index=df.index) + tm.assert_frame_equal(result, expected) + + def test_join_index_mixed(self, join_type): + # no overlapping blocks + df1 = DataFrame(index=np.arange(10)) + df1["bool"] = True + df1["string"] = "foo" + + df2 = DataFrame(index=np.arange(5, 15)) + df2["int"] = 1 + df2["float"] = 1.0 + + joined = df1.join(df2, how=join_type) + expected = _join_by_hand(df1, df2, how=join_type) + tm.assert_frame_equal(joined, expected) + + joined = df2.join(df1, how=join_type) + expected = _join_by_hand(df2, df1, how=join_type) + tm.assert_frame_equal(joined, expected) + + def test_join_index_mixed_overlap(self): + df1 = DataFrame( + {"A": 1.0, "B": 2, "C": "foo", "D": True}, + index=np.arange(10), + columns=["A", "B", "C", "D"], + ) + assert df1["B"].dtype == np.int64 + assert df1["D"].dtype == np.bool_ + + df2 = DataFrame( + {"A": 1.0, "B": 2, "C": "foo", "D": True}, + index=np.arange(0, 10, 2), + columns=["A", "B", "C", "D"], + ) + + # overlap + joined = df1.join(df2, lsuffix="_one", rsuffix="_two") + expected_columns = [ + "A_one", + "B_one", + "C_one", + "D_one", + "A_two", + "B_two", + "C_two", + "D_two", + ] + df1.columns = expected_columns[:4] + df2.columns = expected_columns[4:] + expected = _join_by_hand(df1, df2) + tm.assert_frame_equal(joined, expected) + + def test_join_empty_bug(self): + # generated an exception in 0.4.3 + x = DataFrame() + x.join(DataFrame([3], index=[0], columns=["A"]), how="outer") + + def test_join_unconsolidated(self): + # GH #331 + a = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), columns=["a", "b"] + ) + c = Series(np.random.default_rng(2).standard_normal(30)) + a["c"] = c + d = DataFrame(np.random.default_rng(2).standard_normal((30, 1)), columns=["q"]) + + # it works! + a.join(d) + d.join(a) + + def test_join_multiindex(self): + index1 = MultiIndex.from_arrays( + [["a", "a", "a", "b", "b", "b"], [1, 2, 3, 1, 2, 3]], + names=["first", "second"], + ) + + index2 = MultiIndex.from_arrays( + [["b", "b", "b", "c", "c", "c"], [1, 2, 3, 1, 2, 3]], + names=["first", "second"], + ) + + df1 = DataFrame( + data=np.random.default_rng(2).standard_normal(6), + index=index1, + columns=["var X"], + ) + df2 = DataFrame( + data=np.random.default_rng(2).standard_normal(6), + index=index2, + columns=["var Y"], + ) + + df1 = df1.sort_index(level=0) + df2 = df2.sort_index(level=0) + + joined = df1.join(df2, how="outer") + ex_index = Index(index1.values).union(Index(index2.values)) + expected = df1.reindex(ex_index).join(df2.reindex(ex_index)) + expected.index.names = index1.names + tm.assert_frame_equal(joined, expected) + assert joined.index.names == index1.names + + df1 = df1.sort_index(level=1) + df2 = df2.sort_index(level=1) + + joined = df1.join(df2, how="outer").sort_index(level=0) + ex_index = Index(index1.values).union(Index(index2.values)) + expected = df1.reindex(ex_index).join(df2.reindex(ex_index)) + expected.index.names = index1.names + + tm.assert_frame_equal(joined, expected) + assert joined.index.names == index1.names + + def test_join_inner_multiindex(self, lexsorted_two_level_string_multiindex): + key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"] + key2 = [ + "two", + "one", + "three", + "one", + "two", + "one", + "two", + "two", + "three", + "one", + ] + + data = np.random.default_rng(2).standard_normal(len(key1)) + data = DataFrame({"key1": key1, "key2": key2, "data": data}) + + index = lexsorted_two_level_string_multiindex + to_join = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=["j_one", "j_two", "j_three"], + ) + + joined = data.join(to_join, on=["key1", "key2"], how="inner") + expected = merge( + data, + to_join.reset_index(), + left_on=["key1", "key2"], + right_on=["first", "second"], + how="inner", + sort=False, + ) + + expected2 = merge( + to_join, + data, + right_on=["key1", "key2"], + left_index=True, + how="inner", + sort=False, + ) + tm.assert_frame_equal(joined, expected2.reindex_like(joined)) + + expected2 = merge( + to_join, + data, + right_on=["key1", "key2"], + left_index=True, + how="inner", + sort=False, + ) + + expected = expected.drop(["first", "second"], axis=1) + expected.index = joined.index + + assert joined.index.is_monotonic_increasing + tm.assert_frame_equal(joined, expected) + + # _assert_same_contents(expected, expected2.loc[:, expected.columns]) + + def test_join_hierarchical_mixed_raises(self): + # GH 2024 + # GH 40993: For raising, enforced in 2.0 + df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"]) + new_df = df.groupby(["a"]).agg({"b": ["mean", "sum"]}) + other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"]) + other_df.set_index("a", inplace=True) + # GH 9455, 12219 + with pytest.raises( + pd.errors.MergeError, match="Not allowed to merge between different levels" + ): + merge(new_df, other_df, left_index=True, right_index=True) + + def test_join_float64_float32(self): + a = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + columns=["a", "b"], + dtype=np.float64, + ) + b = DataFrame( + np.random.default_rng(2).standard_normal((10, 1)), + columns=["c"], + dtype=np.float32, + ) + joined = a.join(b) + assert joined.dtypes["a"] == "float64" + assert joined.dtypes["b"] == "float64" + assert joined.dtypes["c"] == "float32" + + a = np.random.default_rng(2).integers(0, 5, 100).astype("int64") + b = np.random.default_rng(2).random(100).astype("float64") + c = np.random.default_rng(2).random(100).astype("float32") + df = DataFrame({"a": a, "b": b, "c": c}) + xpdf = DataFrame({"a": a, "b": b, "c": c}) + s = DataFrame( + np.random.default_rng(2).random(5).astype("float32"), columns=["md"] + ) + rs = df.merge(s, left_on="a", right_index=True) + assert rs.dtypes["a"] == "int64" + assert rs.dtypes["b"] == "float64" + assert rs.dtypes["c"] == "float32" + assert rs.dtypes["md"] == "float32" + + xp = xpdf.merge(s, left_on="a", right_index=True) + tm.assert_frame_equal(rs, xp) + + def test_join_many_non_unique_index(self): + df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]}) + df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]}) + df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]}) + idf1 = df1.set_index(["a", "b"]) + idf2 = df2.set_index(["a", "b"]) + idf3 = df3.set_index(["a", "b"]) + + result = idf1.join([idf2, idf3], how="outer") + + df_partially_merged = merge(df1, df2, on=["a", "b"], how="outer") + expected = merge(df_partially_merged, df3, on=["a", "b"], how="outer") + + result = result.reset_index() + expected = expected[result.columns] + expected["a"] = expected.a.astype("int64") + expected["b"] = expected.b.astype("int64") + tm.assert_frame_equal(result, expected) + + df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]}) + df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]}) + df3 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]}) + idf1 = df1.set_index(["a", "b"]) + idf2 = df2.set_index(["a", "b"]) + idf3 = df3.set_index(["a", "b"]) + result = idf1.join([idf2, idf3], how="inner") + + df_partially_merged = merge(df1, df2, on=["a", "b"], how="inner") + expected = merge(df_partially_merged, df3, on=["a", "b"], how="inner") + + result = result.reset_index() + + tm.assert_frame_equal(result, expected.loc[:, result.columns]) + + # GH 11519 + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + s = Series( + np.repeat(np.arange(8), 2), index=np.repeat(np.arange(8), 2), name="TEST" + ) + inner = df.join(s, how="inner") + outer = df.join(s, how="outer") + left = df.join(s, how="left") + right = df.join(s, how="right") + tm.assert_frame_equal(inner, outer) + tm.assert_frame_equal(inner, left) + tm.assert_frame_equal(inner, right) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + def test_join_sort(self, infer_string): + with option_context("future.infer_string", infer_string): + left = DataFrame( + {"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]} + ) + right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"]) + + joined = left.join(right, on="key", sort=True) + expected = DataFrame( + { + "key": ["bar", "baz", "foo", "foo"], + "value": [2, 3, 1, 4], + "value2": ["a", "b", "c", "c"], + }, + index=[1, 2, 0, 3], + ) + tm.assert_frame_equal(joined, expected) + + # smoke test + joined = left.join(right, on="key", sort=False) + tm.assert_index_equal(joined.index, Index(range(4)), exact=True) + + def test_join_mixed_non_unique_index(self): + # GH 12814, unorderable types in py3 with a non-unique index + df1 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 3, "a"]) + df2 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 3, 3, 4]) + result = df1.join(df2) + expected = DataFrame( + {"a": [1, 2, 3, 3, 4], "b": [5, np.nan, 6, 7, np.nan]}, + index=[1, 2, 3, 3, "a"], + ) + tm.assert_frame_equal(result, expected) + + df3 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 2, "a"]) + df4 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 2, 3, 4]) + result = df3.join(df4) + expected = DataFrame( + {"a": [1, 2, 3, 4], "b": [5, 6, 6, np.nan]}, index=[1, 2, 2, "a"] + ) + tm.assert_frame_equal(result, expected) + + def test_join_non_unique_period_index(self): + # GH #16871 + index = pd.period_range("2016-01-01", periods=16, freq="M") + df = DataFrame(list(range(len(index))), index=index, columns=["pnum"]) + df2 = concat([df, df]) + result = df.join(df2, how="inner", rsuffix="_df2") + expected = DataFrame( + np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2), + columns=["pnum", "pnum_df2"], + index=df2.sort_index().index, + ) + tm.assert_frame_equal(result, expected) + + def test_mixed_type_join_with_suffix(self): + # GH #916 + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 6)), + columns=["a", "b", "c", "d", "e", "f"], + ) + df.insert(0, "id", 0) + df.insert(5, "dt", "foo") + + grouped = df.groupby("id") + msg = re.escape("agg function failed [how->mean,dtype->") + with pytest.raises(TypeError, match=msg): + grouped.mean() + mn = grouped.mean(numeric_only=True) + cn = grouped.count() + + # it works! + mn.join(cn, rsuffix="_right") + + def test_join_many(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 6)), columns=list("abcdef") + ) + df_list = [df[["a", "b"]], df[["c", "d"]], df[["e", "f"]]] + + joined = df_list[0].join(df_list[1:]) + tm.assert_frame_equal(joined, df) + + df_list = [df[["a", "b"]][:-2], df[["c", "d"]][2:], df[["e", "f"]][1:9]] + + def _check_diff_index(df_list, result, exp_index): + reindexed = [x.reindex(exp_index) for x in df_list] + expected = reindexed[0].join(reindexed[1:]) + tm.assert_frame_equal(result, expected) + + # different join types + joined = df_list[0].join(df_list[1:], how="outer") + _check_diff_index(df_list, joined, df.index) + + joined = df_list[0].join(df_list[1:]) + _check_diff_index(df_list, joined, df_list[0].index) + + joined = df_list[0].join(df_list[1:], how="inner") + _check_diff_index(df_list, joined, df.index[2:8]) + + msg = "Joining multiple DataFrames only supported for joining on index" + with pytest.raises(ValueError, match=msg): + df_list[0].join(df_list[1:], on="a") + + def test_join_many_mixed(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), + columns=["A", "B", "C", "D"], + ) + df["key"] = ["foo", "bar"] * 4 + df1 = df.loc[:, ["A", "B"]] + df2 = df.loc[:, ["C", "D"]] + df3 = df.loc[:, ["key"]] + + result = df1.join([df2, df3]) + tm.assert_frame_equal(result, df) + + def test_join_dups(self): + # joining dups + df = concat( + [ + DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "A", "B", "B"], + ), + DataFrame( + np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2), + columns=["A", "C"], + ), + ], + axis=1, + ) + + expected = concat([df, df], axis=1) + result = df.join(df, rsuffix="_2") + result.columns = expected.columns + tm.assert_frame_equal(result, expected) + + # GH 4975, invalid join on dups + w = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"] + ) + x = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"] + ) + y = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"] + ) + z = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"] + ) + + dta = x.merge(y, left_index=True, right_index=True).merge( + z, left_index=True, right_index=True, how="outer" + ) + # GH 40991: As of 2.0 causes duplicate columns + with pytest.raises( + pd.errors.MergeError, + match="Passing 'suffixes' which cause duplicate columns", + ): + dta.merge(w, left_index=True, right_index=True) + + def test_join_multi_to_multi(self, join_type): + # GH 20475 + leftindex = MultiIndex.from_product( + [list("abc"), list("xy"), [1, 2]], names=["abc", "xy", "num"] + ) + left = DataFrame({"v1": range(12)}, index=leftindex) + + rightindex = MultiIndex.from_product( + [list("abc"), list("xy")], names=["abc", "xy"] + ) + right = DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex) + + result = left.join(right, on=["abc", "xy"], how=join_type) + expected = ( + left.reset_index() + .merge(right.reset_index(), on=["abc", "xy"], how=join_type) + .set_index(["abc", "xy", "num"]) + ) + tm.assert_frame_equal(expected, result) + + msg = r'len\(left_on\) must equal the number of levels in the index of "right"' + with pytest.raises(ValueError, match=msg): + left.join(right, on="xy", how=join_type) + + with pytest.raises(ValueError, match=msg): + right.join(left, on=["abc", "xy"], how=join_type) + + def test_join_on_tz_aware_datetimeindex(self): + # GH 23931, 26335 + df1 = DataFrame( + { + "date": pd.date_range( + start="2018-01-01", periods=5, tz="America/Chicago" + ), + "vals": list("abcde"), + } + ) + + df2 = DataFrame( + { + "date": pd.date_range( + start="2018-01-03", periods=5, tz="America/Chicago" + ), + "vals_2": list("tuvwx"), + } + ) + result = df1.join(df2.set_index("date"), on="date") + expected = df1.copy() + expected["vals_2"] = Series([np.nan] * 2 + list("tuv")) + tm.assert_frame_equal(result, expected) + + def test_join_datetime_string(self): + # GH 5647 + dfa = DataFrame( + [ + ["2012-08-02", "L", 10], + ["2012-08-02", "J", 15], + ["2013-04-06", "L", 20], + ["2013-04-06", "J", 25], + ], + columns=["x", "y", "a"], + ) + dfa["x"] = pd.to_datetime(dfa["x"]).astype("M8[ns]") + dfb = DataFrame( + [["2012-08-02", "J", 1], ["2013-04-06", "L", 2]], + columns=["x", "y", "z"], + index=[2, 4], + ) + dfb["x"] = pd.to_datetime(dfb["x"]).astype("M8[ns]") + result = dfb.join(dfa.set_index(["x", "y"]), on=["x", "y"]) + expected = DataFrame( + [ + [Timestamp("2012-08-02 00:00:00"), "J", 1, 15], + [Timestamp("2013-04-06 00:00:00"), "L", 2, 20], + ], + index=[2, 4], + columns=["x", "y", "z", "a"], + ) + expected["x"] = expected["x"].astype("M8[ns]") + tm.assert_frame_equal(result, expected) + + def test_join_with_categorical_index(self): + # GH47812 + ix = ["a", "b"] + id1 = pd.CategoricalIndex(ix, categories=ix) + id2 = pd.CategoricalIndex(reversed(ix), categories=reversed(ix)) + + df1 = DataFrame({"c1": ix}, index=id1) + df2 = DataFrame({"c2": reversed(ix)}, index=id2) + result = df1.join(df2) + expected = DataFrame( + {"c1": ["a", "b"], "c2": ["a", "b"]}, + index=pd.CategoricalIndex(["a", "b"], categories=["a", "b"]), + ) + tm.assert_frame_equal(result, expected) + + +def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix="_y"): + # some smoke tests + for c in join_col: + assert result[c].notna().all() + + left_grouped = left.groupby(join_col) + right_grouped = right.groupby(join_col) + + for group_key, group in result.groupby(join_col): + l_joined = _restrict_to_columns(group, left.columns, lsuffix) + r_joined = _restrict_to_columns(group, right.columns, rsuffix) + + try: + lgroup = left_grouped.get_group(group_key) + except KeyError as err: + if how in ("left", "inner"): + raise AssertionError( + f"key {group_key} should not have been in the join" + ) from err + + _assert_all_na(l_joined, left.columns, join_col) + else: + _assert_same_contents(l_joined, lgroup) + + try: + rgroup = right_grouped.get_group(group_key) + except KeyError as err: + if how in ("right", "inner"): + raise AssertionError( + f"key {group_key} should not have been in the join" + ) from err + + _assert_all_na(r_joined, right.columns, join_col) + else: + _assert_same_contents(r_joined, rgroup) + + +def _restrict_to_columns(group, columns, suffix): + found = [ + c for c in group.columns if c in columns or c.replace(suffix, "") in columns + ] + + # filter + group = group.loc[:, found] + + # get rid of suffixes, if any + group = group.rename(columns=lambda x: x.replace(suffix, "")) + + # put in the right order... + group = group.loc[:, columns] + + return group + + +def _assert_same_contents(join_chunk, source): + NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly... + + jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values + svalues = source.fillna(NA_SENTINEL).drop_duplicates().values + + rows = {tuple(row) for row in jvalues} + assert len(rows) == len(source) + assert all(tuple(row) in rows for row in svalues) + + +def _assert_all_na(join_chunk, source_columns, join_col): + for c in source_columns: + if c in join_col: + continue + assert join_chunk[c].isna().all() + + +def _join_by_hand(a, b, how="left"): + join_index = a.index.join(b.index, how=how) + + a_re = a.reindex(join_index) + b_re = b.reindex(join_index) + + result_columns = a.columns.append(b.columns) + + for col, s in b_re.items(): + a_re[col] = s + return a_re.reindex(columns=result_columns) + + +def test_join_inner_multiindex_deterministic_order(): + # GH: 36910 + left = DataFrame( + data={"e": 5}, + index=MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")), + ) + right = DataFrame( + data={"f": 6}, index=MultiIndex.from_tuples([(2, 3)], names=("b", "c")) + ) + result = left.join(right, how="inner") + expected = DataFrame( + {"e": [5], "f": [6]}, + index=MultiIndex.from_tuples([(1, 2, 4, 3)], names=("a", "b", "d", "c")), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + ("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])] +) +def test_join_cross(input_col, output_cols): + # GH#5401 + left = DataFrame({"a": [1, 3]}) + right = DataFrame({input_col: [3, 4]}) + result = left.join(right, how="cross", lsuffix="_x", rsuffix="_y") + expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]}) + tm.assert_frame_equal(result, expected) + + +def test_join_multiindex_one_level(join_type): + # GH#36909 + left = DataFrame( + data={"c": 3}, index=MultiIndex.from_tuples([(1, 2)], names=("a", "b")) + ) + right = DataFrame(data={"d": 4}, index=MultiIndex.from_tuples([(2,)], names=("b",))) + result = left.join(right, how=join_type) + if join_type == "right": + expected = DataFrame( + {"c": [3], "d": [4]}, + index=MultiIndex.from_tuples([(2, 1)], names=["b", "a"]), + ) + else: + expected = DataFrame( + {"c": [3], "d": [4]}, + index=MultiIndex.from_tuples([(1, 2)], names=["a", "b"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "categories, values", + [ + (["Y", "X"], ["Y", "X", "X"]), + ([2, 1], [2, 1, 1]), + ([2.5, 1.5], [2.5, 1.5, 1.5]), + ( + [Timestamp("2020-12-31"), Timestamp("2019-12-31")], + [Timestamp("2020-12-31"), Timestamp("2019-12-31"), Timestamp("2019-12-31")], + ), + ], +) +def test_join_multiindex_not_alphabetical_categorical(categories, values): + # GH#38502 + left = DataFrame( + { + "first": ["A", "A"], + "second": Categorical(categories, categories=categories), + "value": [1, 2], + } + ).set_index(["first", "second"]) + right = DataFrame( + { + "first": ["A", "A", "B"], + "second": Categorical(values, categories=categories), + "value": [3, 4, 5], + } + ).set_index(["first", "second"]) + result = left.join(right, lsuffix="_left", rsuffix="_right") + + expected = DataFrame( + { + "first": ["A", "A"], + "second": Categorical(categories, categories=categories), + "value_left": [1, 2], + "value_right": [3, 4], + } + ).set_index(["first", "second"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "left_empty, how, exp", + [ + (False, "left", "left"), + (False, "right", "empty"), + (False, "inner", "empty"), + (False, "outer", "left"), + (False, "cross", "empty"), + (True, "left", "empty"), + (True, "right", "right"), + (True, "inner", "empty"), + (True, "outer", "right"), + (True, "cross", "empty"), + ], +) +def test_join_empty(left_empty, how, exp): + left = DataFrame({"A": [2, 1], "B": [3, 4]}, dtype="int64").set_index("A") + right = DataFrame({"A": [1], "C": [5]}, dtype="int64").set_index("A") + + if left_empty: + left = left.head(0) + else: + right = right.head(0) + + result = left.join(right, how=how) + + if exp == "left": + expected = DataFrame({"A": [2, 1], "B": [3, 4], "C": [np.nan, np.nan]}) + expected = expected.set_index("A") + elif exp == "right": + expected = DataFrame({"B": [np.nan], "A": [1], "C": [5]}) + expected = expected.set_index("A") + elif exp == "empty": + expected = DataFrame(columns=["B", "C"], dtype="int64") + if how != "cross": + expected = expected.rename_axis("A") + if how == "outer": + expected = expected.sort_index() + + tm.assert_frame_equal(result, expected) + + +def test_join_empty_uncomparable_columns(): + # GH 57048 + df1 = DataFrame() + df2 = DataFrame(columns=["test"]) + df3 = DataFrame(columns=["foo", ("bar", "baz")]) + + result = df1 + df2 + expected = DataFrame(columns=["test"]) + tm.assert_frame_equal(result, expected) + + result = df2 + df3 + expected = DataFrame(columns=[("bar", "baz"), "foo", "test"]) + tm.assert_frame_equal(result, expected) + + result = df1 + df3 + expected = DataFrame(columns=[("bar", "baz"), "foo"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "how, values", + [ + ("inner", [0, 1, 2]), + ("outer", [0, 1, 2]), + ("left", [0, 1, 2]), + ("right", [0, 2, 1]), + ], +) +def test_join_multiindex_categorical_output_index_dtype(how, values): + # GH#50906 + df1 = DataFrame( + { + "a": Categorical([0, 1, 2]), + "b": Categorical([0, 1, 2]), + "c": [0, 1, 2], + } + ).set_index(["a", "b"]) + + df2 = DataFrame( + { + "a": Categorical([0, 2, 1]), + "b": Categorical([0, 2, 1]), + "d": [0, 2, 1], + } + ).set_index(["a", "b"]) + + expected = DataFrame( + { + "a": Categorical(values), + "b": Categorical(values), + "c": values, + "d": values, + } + ).set_index(["a", "b"]) + + result = df1.join(df2, how=how) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge.py new file mode 100644 index 0000000000000000000000000000000000000000..ed49f3b758cc5cd826a804c8de611681a3e0343c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge.py @@ -0,0 +1,3020 @@ +from datetime import ( + date, + datetime, + timedelta, +) +import re + +import numpy as np +import pytest + +from pandas.core.dtypes.common import ( + is_object_dtype, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.core.reshape.concat import concat +from pandas.core.reshape.merge import ( + MergeError, + merge, +) + + +def get_test_data(ngroups=8, n=50): + unique_groups = list(range(ngroups)) + arr = np.asarray(np.tile(unique_groups, n // ngroups)) + + if len(arr) < n: + arr = np.asarray(list(arr) + unique_groups[: n - len(arr)]) + + np.random.default_rng(2).shuffle(arr) + return arr + + +def get_series(): + return [ + Series([1], dtype="int64"), + Series([1], dtype="Int64"), + Series([1.23]), + Series(["foo"]), + Series([True]), + Series([pd.Timestamp("2018-01-01")]), + Series([pd.Timestamp("2018-01-01", tz="US/Eastern")]), + ] + + +def get_series_na(): + return [ + Series([np.nan], dtype="Int64"), + Series([np.nan], dtype="float"), + Series([np.nan], dtype="object"), + Series([pd.NaT]), + ] + + +@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name) +def series_of_dtype(request): + """ + A parametrized fixture returning a variety of Series of different + dtypes + """ + return request.param + + +@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name) +def series_of_dtype2(request): + """ + A duplicate of the series_of_dtype fixture, so that it can be used + twice by a single function + """ + return request.param + + +@pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name) +def series_of_dtype_all_na(request): + """ + A parametrized fixture returning a variety of Series with all NA + values + """ + return request.param + + +@pytest.fixture +def dfs_for_indicator(): + df1 = DataFrame({"col1": [0, 1], "col_conflict": [1, 2], "col_left": ["a", "b"]}) + df2 = DataFrame( + { + "col1": [1, 2, 3, 4, 5], + "col_conflict": [1, 2, 3, 4, 5], + "col_right": [2, 2, 2, 2, 2], + } + ) + return df1, df2 + + +class TestMerge: + @pytest.fixture + def df(self): + df = DataFrame( + { + "key1": get_test_data(), + "key2": get_test_data(), + "data1": np.random.default_rng(2).standard_normal(50), + "data2": np.random.default_rng(2).standard_normal(50), + } + ) + + # exclude a couple keys for fun + df = df[df["key2"] > 1] + return df + + @pytest.fixture + def df2(self): + return DataFrame( + { + "key1": get_test_data(n=10), + "key2": get_test_data(ngroups=4, n=10), + "value": np.random.default_rng(2).standard_normal(10), + } + ) + + @pytest.fixture + def left(self): + return DataFrame( + { + "key": ["a", "b", "c", "d", "e", "e", "a"], + "v1": np.random.default_rng(2).standard_normal(7), + } + ) + + @pytest.fixture + def right(self): + return DataFrame( + {"v2": np.random.default_rng(2).standard_normal(4)}, + index=["d", "b", "c", "a"], + ) + + def test_merge_inner_join_empty(self): + # GH 15328 + df_empty = DataFrame() + df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64") + result = merge(df_empty, df_a, left_index=True, right_index=True) + expected = DataFrame({"a": []}, dtype="int64") + tm.assert_frame_equal(result, expected) + + def test_merge_common(self, df, df2): + joined = merge(df, df2) + exp = merge(df, df2, on=["key1", "key2"]) + tm.assert_frame_equal(joined, exp) + + def test_merge_non_string_columns(self): + # https://github.com/pandas-dev/pandas/issues/17962 + # Checks that method runs for non string column names + left = DataFrame( + {0: [1, 0, 1, 0], 1: [0, 1, 0, 0], 2: [0, 0, 2, 0], 3: [1, 0, 0, 3]} + ) + + right = left.astype(float) + expected = left + result = merge(left, right) + tm.assert_frame_equal(expected, result) + + def test_merge_index_as_on_arg(self, df, df2): + # GH14355 + + left = df.set_index("key1") + right = df2.set_index("key1") + result = merge(left, right, on="key1") + expected = merge(df, df2, on="key1").set_index("key1") + tm.assert_frame_equal(result, expected) + + def test_merge_index_singlekey_right_vs_left(self): + left = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "e", "a"], + "v1": np.random.default_rng(2).standard_normal(7), + } + ) + right = DataFrame( + {"v2": np.random.default_rng(2).standard_normal(4)}, + index=["d", "b", "c", "a"], + ) + + merged1 = merge( + left, right, left_on="key", right_index=True, how="left", sort=False + ) + merged2 = merge( + right, left, right_on="key", left_index=True, how="right", sort=False + ) + tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns]) + + merged1 = merge( + left, right, left_on="key", right_index=True, how="left", sort=True + ) + merged2 = merge( + right, left, right_on="key", left_index=True, how="right", sort=True + ) + tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns]) + + def test_merge_index_singlekey_inner(self): + left = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "e", "a"], + "v1": np.random.default_rng(2).standard_normal(7), + } + ) + right = DataFrame( + {"v2": np.random.default_rng(2).standard_normal(4)}, + index=["d", "b", "c", "a"], + ) + + # inner join + result = merge(left, right, left_on="key", right_index=True, how="inner") + expected = left.join(right, on="key").loc[result.index] + tm.assert_frame_equal(result, expected) + + result = merge(right, left, right_on="key", left_index=True, how="inner") + expected = left.join(right, on="key").loc[result.index] + tm.assert_frame_equal(result, expected.loc[:, result.columns]) + + def test_merge_misspecified(self, df, df2, left, right): + msg = "Must pass right_on or right_index=True" + with pytest.raises(pd.errors.MergeError, match=msg): + merge(left, right, left_index=True) + msg = "Must pass left_on or left_index=True" + with pytest.raises(pd.errors.MergeError, match=msg): + merge(left, right, right_index=True) + + msg = ( + 'Can only pass argument "on" OR "left_on" and "right_on", not ' + "a combination of both" + ) + with pytest.raises(pd.errors.MergeError, match=msg): + merge(left, left, left_on="key", on="key") + + msg = r"len\(right_on\) must equal len\(left_on\)" + with pytest.raises(ValueError, match=msg): + merge(df, df2, left_on=["key1"], right_on=["key1", "key2"]) + + def test_index_and_on_parameters_confusion(self, df, df2): + msg = "right_index parameter must be of type bool, not " + with pytest.raises(ValueError, match=msg): + merge( + df, + df2, + how="left", + left_index=False, + right_index=["key1", "key2"], + ) + msg = "left_index parameter must be of type bool, not " + with pytest.raises(ValueError, match=msg): + merge( + df, + df2, + how="left", + left_index=["key1", "key2"], + right_index=False, + ) + with pytest.raises(ValueError, match=msg): + merge( + df, + df2, + how="left", + left_index=["key1", "key2"], + right_index=["key1", "key2"], + ) + + def test_merge_overlap(self, left): + merged = merge(left, left, on="key") + exp_len = (left["key"].value_counts() ** 2).sum() + assert len(merged) == exp_len + assert "v1_x" in merged + assert "v1_y" in merged + + def test_merge_different_column_key_names(self): + left = DataFrame({"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]}) + right = DataFrame({"rkey": ["foo", "bar", "qux", "foo"], "value": [5, 6, 7, 8]}) + + merged = left.merge( + right, left_on="lkey", right_on="rkey", how="outer", sort=True + ) + + exp = Series(["bar", "baz", "foo", "foo", "foo", "foo", np.nan], name="lkey") + tm.assert_series_equal(merged["lkey"], exp) + + exp = Series(["bar", np.nan, "foo", "foo", "foo", "foo", "qux"], name="rkey") + tm.assert_series_equal(merged["rkey"], exp) + + exp = Series([2, 3, 1, 1, 4, 4, np.nan], name="value_x") + tm.assert_series_equal(merged["value_x"], exp) + + exp = Series([6, np.nan, 5, 8, 5, 8, 7], name="value_y") + tm.assert_series_equal(merged["value_y"], exp) + + def test_merge_copy(self): + left = DataFrame({"a": 0, "b": 1}, index=range(10)) + right = DataFrame({"c": "foo", "d": "bar"}, index=range(10)) + + merged = merge(left, right, left_index=True, right_index=True, copy=True) + + merged["a"] = 6 + assert (left["a"] == 0).all() + + merged["d"] = "peekaboo" + assert (right["d"] == "bar").all() + + def test_merge_nocopy(self, using_array_manager, using_infer_string): + left = DataFrame({"a": 0, "b": 1}, index=range(10)) + right = DataFrame({"c": "foo", "d": "bar"}, index=range(10)) + + merged = merge(left, right, left_index=True, right_index=True, copy=False) + + assert np.shares_memory(merged["a"]._values, left["a"]._values) + if not using_infer_string: + assert np.shares_memory(merged["d"]._values, right["d"]._values) + + def test_intelligently_handle_join_key(self): + # #733, be a bit more 1337 about not returning unconsolidated DataFrame + + left = DataFrame( + {"key": [1, 1, 2, 2, 3], "value": list(range(5))}, columns=["value", "key"] + ) + right = DataFrame({"key": [1, 1, 2, 3, 4, 5], "rvalue": list(range(6))}) + + joined = merge(left, right, on="key", how="outer") + expected = DataFrame( + { + "key": [1, 1, 1, 1, 2, 2, 3, 4, 5], + "value": np.array([0, 0, 1, 1, 2, 3, 4, np.nan, np.nan]), + "rvalue": [0, 1, 0, 1, 2, 2, 3, 4, 5], + }, + columns=["value", "key", "rvalue"], + ) + tm.assert_frame_equal(joined, expected) + + def test_merge_join_key_dtype_cast(self): + # #8596 + + df1 = DataFrame({"key": [1], "v1": [10]}) + df2 = DataFrame({"key": [2], "v1": [20]}) + df = merge(df1, df2, how="outer") + assert df["key"].dtype == "int64" + + df1 = DataFrame({"key": [True], "v1": [1]}) + df2 = DataFrame({"key": [False], "v1": [0]}) + df = merge(df1, df2, how="outer") + + # GH13169 + # GH#40073 + assert df["key"].dtype == "bool" + + df1 = DataFrame({"val": [1]}) + df2 = DataFrame({"val": [2]}) + lkey = np.array([1]) + rkey = np.array([2]) + df = merge(df1, df2, left_on=lkey, right_on=rkey, how="outer") + assert df["key_0"].dtype == np.dtype(int) + + def test_handle_join_key_pass_array(self): + left = DataFrame( + {"key": [1, 1, 2, 2, 3], "value": np.arange(5)}, + columns=["value", "key"], + dtype="int64", + ) + right = DataFrame({"rvalue": np.arange(6)}, dtype="int64") + key = np.array([1, 1, 2, 3, 4, 5], dtype="int64") + + merged = merge(left, right, left_on="key", right_on=key, how="outer") + merged2 = merge(right, left, left_on=key, right_on="key", how="outer") + + tm.assert_series_equal(merged["key"], merged2["key"]) + assert merged["key"].notna().all() + assert merged2["key"].notna().all() + + left = DataFrame({"value": np.arange(5)}, columns=["value"]) + right = DataFrame({"rvalue": np.arange(6)}) + lkey = np.array([1, 1, 2, 2, 3]) + rkey = np.array([1, 1, 2, 3, 4, 5]) + + merged = merge(left, right, left_on=lkey, right_on=rkey, how="outer") + expected = Series([1, 1, 1, 1, 2, 2, 3, 4, 5], dtype=int, name="key_0") + tm.assert_series_equal(merged["key_0"], expected) + + left = DataFrame({"value": np.arange(3)}) + right = DataFrame({"rvalue": np.arange(6)}) + + key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64) + merged = merge(left, right, left_index=True, right_on=key, how="outer") + tm.assert_series_equal(merged["key_0"], Series(key, name="key_0")) + + def test_no_overlap_more_informative_error(self): + dt = datetime.now() + df1 = DataFrame({"x": ["a"]}, index=[dt]) + + df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt]) + + msg = ( + "No common columns to perform merge on. " + f"Merge options: left_on={None}, right_on={None}, " + f"left_index={False}, right_index={False}" + ) + + with pytest.raises(MergeError, match=msg): + merge(df1, df2) + + def test_merge_non_unique_indexes(self): + dt = datetime(2012, 5, 1) + dt2 = datetime(2012, 5, 2) + dt3 = datetime(2012, 5, 3) + dt4 = datetime(2012, 5, 4) + + df1 = DataFrame({"x": ["a"]}, index=[dt]) + df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt]) + _check_merge(df1, df2) + + # Not monotonic + df1 = DataFrame({"x": ["a", "b", "q"]}, index=[dt2, dt, dt4]) + df2 = DataFrame( + {"y": ["c", "d", "e", "f", "g", "h"]}, index=[dt3, dt3, dt2, dt2, dt, dt] + ) + _check_merge(df1, df2) + + df1 = DataFrame({"x": ["a", "b"]}, index=[dt, dt]) + df2 = DataFrame({"y": ["c", "d"]}, index=[dt, dt]) + _check_merge(df1, df2) + + def test_merge_non_unique_index_many_to_many(self): + dt = datetime(2012, 5, 1) + dt2 = datetime(2012, 5, 2) + dt3 = datetime(2012, 5, 3) + df1 = DataFrame({"x": ["a", "b", "c", "d"]}, index=[dt2, dt2, dt, dt]) + df2 = DataFrame( + {"y": ["e", "f", "g", " h", "i"]}, index=[dt2, dt2, dt3, dt, dt] + ) + _check_merge(df1, df2) + + def test_left_merge_empty_dataframe(self): + left = DataFrame({"key": [1], "value": [2]}) + right = DataFrame({"key": []}) + + result = merge(left, right, on="key", how="left") + tm.assert_frame_equal(result, left) + + result = merge(right, left, on="key", how="right") + tm.assert_frame_equal(result, left) + + @pytest.mark.parametrize("how", ["inner", "left", "right", "outer"]) + def test_merge_empty_dataframe(self, index, how): + # GH52777 + left = DataFrame([], index=index[:0]) + right = left.copy() + + result = left.join(right, how=how) + tm.assert_frame_equal(result, left) + + @pytest.mark.parametrize( + "kwarg", + [ + {"left_index": True, "right_index": True}, + {"left_index": True, "right_on": "x"}, + {"left_on": "a", "right_index": True}, + {"left_on": "a", "right_on": "x"}, + ], + ) + def test_merge_left_empty_right_empty(self, join_type, kwarg): + # GH 10824 + left = DataFrame(columns=["a", "b", "c"]) + right = DataFrame(columns=["x", "y", "z"]) + + exp_in = DataFrame(columns=["a", "b", "c", "x", "y", "z"], dtype=object) + + result = merge(left, right, how=join_type, **kwarg) + tm.assert_frame_equal(result, exp_in) + + def test_merge_left_empty_right_notempty(self): + # GH 10824 + left = DataFrame(columns=["a", "b", "c"]) + right = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["x", "y", "z"]) + + exp_out = DataFrame( + { + "a": np.array([np.nan] * 3, dtype=object), + "b": np.array([np.nan] * 3, dtype=object), + "c": np.array([np.nan] * 3, dtype=object), + "x": [1, 4, 7], + "y": [2, 5, 8], + "z": [3, 6, 9], + }, + columns=["a", "b", "c", "x", "y", "z"], + ) + exp_in = exp_out[0:0] # make empty DataFrame keeping dtype + + def check1(exp, kwarg): + result = merge(left, right, how="inner", **kwarg) + tm.assert_frame_equal(result, exp) + result = merge(left, right, how="left", **kwarg) + tm.assert_frame_equal(result, exp) + + def check2(exp, kwarg): + result = merge(left, right, how="right", **kwarg) + tm.assert_frame_equal(result, exp) + result = merge(left, right, how="outer", **kwarg) + tm.assert_frame_equal(result, exp) + + for kwarg in [ + {"left_index": True, "right_index": True}, + {"left_index": True, "right_on": "x"}, + ]: + check1(exp_in, kwarg) + check2(exp_out, kwarg) + + kwarg = {"left_on": "a", "right_index": True} + check1(exp_in, kwarg) + exp_out["a"] = [0, 1, 2] + check2(exp_out, kwarg) + + kwarg = {"left_on": "a", "right_on": "x"} + check1(exp_in, kwarg) + exp_out["a"] = np.array([np.nan] * 3, dtype=object) + check2(exp_out, kwarg) + + def test_merge_left_notempty_right_empty(self): + # GH 10824 + left = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]) + right = DataFrame(columns=["x", "y", "z"]) + + exp_out = DataFrame( + { + "a": [1, 4, 7], + "b": [2, 5, 8], + "c": [3, 6, 9], + "x": np.array([np.nan] * 3, dtype=object), + "y": np.array([np.nan] * 3, dtype=object), + "z": np.array([np.nan] * 3, dtype=object), + }, + columns=["a", "b", "c", "x", "y", "z"], + ) + exp_in = exp_out[0:0] # make empty DataFrame keeping dtype + # result will have object dtype + exp_in.index = exp_in.index.astype(object) + + def check1(exp, kwarg): + result = merge(left, right, how="inner", **kwarg) + tm.assert_frame_equal(result, exp) + result = merge(left, right, how="right", **kwarg) + tm.assert_frame_equal(result, exp) + + def check2(exp, kwarg): + result = merge(left, right, how="left", **kwarg) + tm.assert_frame_equal(result, exp) + result = merge(left, right, how="outer", **kwarg) + tm.assert_frame_equal(result, exp) + + # TODO: should the next loop be un-indented? doing so breaks this test + for kwarg in [ + {"left_index": True, "right_index": True}, + {"left_index": True, "right_on": "x"}, + {"left_on": "a", "right_index": True}, + {"left_on": "a", "right_on": "x"}, + ]: + check1(exp_in, kwarg) + check2(exp_out, kwarg) + + def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2): + # GH 25183 + df = DataFrame( + {"key": series_of_dtype, "value": series_of_dtype2}, + columns=["key", "value"], + ) + df_empty = df[:0] + expected = DataFrame( + { + "key": Series(dtype=df.dtypes["key"]), + "value_x": Series(dtype=df.dtypes["value"]), + "value_y": Series(dtype=df.dtypes["value"]), + }, + columns=["key", "value_x", "value_y"], + ) + actual = df_empty.merge(df, on="key") + tm.assert_frame_equal(actual, expected) + + def test_merge_all_na_column(self, series_of_dtype, series_of_dtype_all_na): + # GH 25183 + df_left = DataFrame( + {"key": series_of_dtype, "value": series_of_dtype_all_na}, + columns=["key", "value"], + ) + df_right = DataFrame( + {"key": series_of_dtype, "value": series_of_dtype_all_na}, + columns=["key", "value"], + ) + expected = DataFrame( + { + "key": series_of_dtype, + "value_x": series_of_dtype_all_na, + "value_y": series_of_dtype_all_na, + }, + columns=["key", "value_x", "value_y"], + ) + actual = df_left.merge(df_right, on="key") + tm.assert_frame_equal(actual, expected) + + def test_merge_nosort(self): + # GH#2098 + + d = { + "var1": np.random.default_rng(2).integers(0, 10, size=10), + "var2": np.random.default_rng(2).integers(0, 10, size=10), + "var3": [ + datetime(2012, 1, 12), + datetime(2011, 2, 4), + datetime(2010, 2, 3), + datetime(2012, 1, 12), + datetime(2011, 2, 4), + datetime(2012, 4, 3), + datetime(2012, 3, 4), + datetime(2008, 5, 1), + datetime(2010, 2, 3), + datetime(2012, 2, 3), + ], + } + df = DataFrame.from_dict(d) + var3 = df.var3.unique() + var3 = np.sort(var3) + new = DataFrame.from_dict( + {"var3": var3, "var8": np.random.default_rng(2).random(7)} + ) + + result = df.merge(new, on="var3", sort=False) + exp = merge(df, new, on="var3", sort=False) + tm.assert_frame_equal(result, exp) + + assert (df.var3.unique() == result.var3.unique()).all() + + @pytest.mark.parametrize( + ("sort", "values"), [(False, [1, 1, 0, 1, 1]), (True, [0, 1, 1, 1, 1])] + ) + @pytest.mark.parametrize("how", ["left", "right"]) + def test_merge_same_order_left_right(self, sort, values, how): + # GH#35382 + df = DataFrame({"a": [1, 0, 1]}) + + result = df.merge(df, on="a", how=how, sort=sort) + expected = DataFrame(values, columns=["a"]) + tm.assert_frame_equal(result, expected) + + def test_merge_nan_right(self): + df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]}) + df2 = DataFrame({"i1": [0], "i3": [0]}) + result = df1.join(df2, on="i1", rsuffix="_") + expected = ( + DataFrame( + { + "i1": {0: 0.0, 1: 1}, + "i2": {0: 0, 1: 1}, + "i1_": {0: 0, 1: np.nan}, + "i3": {0: 0.0, 1: np.nan}, + None: {0: 0, 1: 0}, + }, + columns=Index(["i1", "i2", "i1_", "i3", None], dtype=object), + ) + .set_index(None) + .reset_index()[["i1", "i2", "i1_", "i3"]] + ) + result.columns = result.columns.astype("object") + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_merge_nan_right2(self): + df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]}) + df2 = DataFrame({"i1": [0], "i3": [0.7]}) + result = df1.join(df2, rsuffix="_", on="i1") + expected = DataFrame( + { + "i1": {0: 0, 1: 1}, + "i1_": {0: 0.0, 1: np.nan}, + "i2": {0: 0.5, 1: 1.5}, + "i3": {0: 0.69999999999999996, 1: np.nan}, + } + )[["i1", "i2", "i1_", "i3"]] + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" + ) + def test_merge_type(self, df, df2): + class NotADataFrame(DataFrame): + @property + def _constructor(self): + return NotADataFrame + + nad = NotADataFrame(df) + result = nad.merge(df2, on="key1") + + assert isinstance(result, NotADataFrame) + + def test_join_append_timedeltas(self, using_array_manager): + # timedelta64 issues with join/merge + # GH 5695 + + d = DataFrame.from_dict( + {"d": [datetime(2013, 11, 5, 5, 56)], "t": [timedelta(0, 22500)]} + ) + df = DataFrame(columns=list("dt")) + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + warn = FutureWarning + if using_array_manager: + warn = None + with tm.assert_produces_warning(warn, match=msg): + df = concat([df, d], ignore_index=True) + result = concat([df, d], ignore_index=True) + expected = DataFrame( + { + "d": [datetime(2013, 11, 5, 5, 56), datetime(2013, 11, 5, 5, 56)], + "t": [timedelta(0, 22500), timedelta(0, 22500)], + } + ) + if using_array_manager: + # TODO(ArrayManager) decide on exact casting rules in concat + expected = expected.astype(object) + tm.assert_frame_equal(result, expected) + + def test_join_append_timedeltas2(self): + # timedelta64 issues with join/merge + # GH 5695 + td = np.timedelta64(300000000) + lhs = DataFrame(Series([td, td], index=["A", "B"])) + rhs = DataFrame(Series([td], index=["A"])) + + result = lhs.join(rhs, rsuffix="r", how="left") + expected = DataFrame( + { + "0": Series([td, td], index=list("AB")), + "0r": Series([td, pd.NaT], index=list("AB")), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) + def test_other_datetime_unit(self, unit): + # GH 13389 + df1 = DataFrame({"entity_id": [101, 102]}) + ser = Series([None, None], index=[101, 102], name="days") + + dtype = f"datetime64[{unit}]" + + if unit in ["D", "h", "m"]: + # not supported so we cast to the nearest supported unit, seconds + exp_dtype = "datetime64[s]" + else: + exp_dtype = dtype + df2 = ser.astype(exp_dtype).to_frame("days") + assert df2["days"].dtype == exp_dtype + + result = df1.merge(df2, left_on="entity_id", right_index=True) + + days = np.array(["nat", "nat"], dtype=exp_dtype) + days = pd.core.arrays.DatetimeArray._simple_new(days, dtype=days.dtype) + exp = DataFrame( + { + "entity_id": [101, 102], + "days": days, + }, + columns=["entity_id", "days"], + ) + assert exp["days"].dtype == exp_dtype + tm.assert_frame_equal(result, exp) + + @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) + def test_other_timedelta_unit(self, unit): + # GH 13389 + df1 = DataFrame({"entity_id": [101, 102]}) + ser = Series([None, None], index=[101, 102], name="days") + + dtype = f"m8[{unit}]" + if unit in ["D", "h", "m"]: + # We cannot astype, instead do nearest supported unit, i.e. "s" + msg = "Supported resolutions are 's', 'ms', 'us', 'ns'" + with pytest.raises(ValueError, match=msg): + ser.astype(dtype) + + df2 = ser.astype("m8[s]").to_frame("days") + else: + df2 = ser.astype(dtype).to_frame("days") + assert df2["days"].dtype == dtype + + result = df1.merge(df2, left_on="entity_id", right_index=True) + + exp = DataFrame( + {"entity_id": [101, 102], "days": np.array(["nat", "nat"], dtype=dtype)}, + columns=["entity_id", "days"], + ) + tm.assert_frame_equal(result, exp) + + def test_overlapping_columns_error_message(self): + df = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]}) + df2 = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]}) + + df.columns = ["key", "foo", "foo"] + df2.columns = ["key", "bar", "bar"] + expected = DataFrame( + { + "key": [1, 2, 3], + "v1": [4, 5, 6], + "v2": [7, 8, 9], + "v3": [4, 5, 6], + "v4": [7, 8, 9], + } + ) + expected.columns = ["key", "foo", "foo", "bar", "bar"] + tm.assert_frame_equal(merge(df, df2), expected) + + # #2649, #10639 + df2.columns = ["key1", "foo", "foo"] + msg = r"Data columns not unique: Index\(\['foo'\], dtype='object|string'\)" + with pytest.raises(MergeError, match=msg): + merge(df, df2) + + def test_merge_on_datetime64tz(self): + # GH11405 + left = DataFrame( + { + "key": pd.date_range("20151010", periods=2, tz="US/Eastern"), + "value": [1, 2], + } + ) + right = DataFrame( + { + "key": pd.date_range("20151011", periods=3, tz="US/Eastern"), + "value": [1, 2, 3], + } + ) + + expected = DataFrame( + { + "key": pd.date_range("20151010", periods=4, tz="US/Eastern"), + "value_x": [1, 2, np.nan, np.nan], + "value_y": [np.nan, 1, 2, 3], + } + ) + result = merge(left, right, on="key", how="outer") + tm.assert_frame_equal(result, expected) + + def test_merge_datetime64tz_values(self): + left = DataFrame( + { + "key": [1, 2], + "value": pd.date_range("20151010", periods=2, tz="US/Eastern"), + } + ) + right = DataFrame( + { + "key": [2, 3], + "value": pd.date_range("20151011", periods=2, tz="US/Eastern"), + } + ) + expected = DataFrame( + { + "key": [1, 2, 3], + "value_x": list(pd.date_range("20151010", periods=2, tz="US/Eastern")) + + [pd.NaT], + "value_y": [pd.NaT] + + list(pd.date_range("20151011", periods=2, tz="US/Eastern")), + } + ) + result = merge(left, right, on="key", how="outer") + tm.assert_frame_equal(result, expected) + assert result["value_x"].dtype == "datetime64[ns, US/Eastern]" + assert result["value_y"].dtype == "datetime64[ns, US/Eastern]" + + def test_merge_on_datetime64tz_empty(self): + # https://github.com/pandas-dev/pandas/issues/25014 + dtz = pd.DatetimeTZDtype(tz="UTC") + right = DataFrame( + { + "date": DatetimeIndex(["2018"], dtype=dtz), + "value": [4.0], + "date2": DatetimeIndex(["2019"], dtype=dtz), + }, + columns=["date", "value", "date2"], + ) + left = right[:0] + result = left.merge(right, on="date") + expected = DataFrame( + { + "date": Series(dtype=dtz), + "value_x": Series(dtype=float), + "date2_x": Series(dtype=dtz), + "value_y": Series(dtype=float), + "date2_y": Series(dtype=dtz), + }, + columns=["date", "value_x", "date2_x", "value_y", "date2_y"], + ) + tm.assert_frame_equal(result, expected) + + def test_merge_datetime64tz_with_dst_transition(self): + # GH 18885 + df1 = DataFrame( + pd.date_range("2017-10-29 01:00", periods=4, freq="h", tz="Europe/Madrid"), + columns=["date"], + ) + df1["value"] = 1 + df2 = DataFrame( + { + "date": pd.to_datetime( + [ + "2017-10-29 03:00:00", + "2017-10-29 04:00:00", + "2017-10-29 05:00:00", + ] + ), + "value": 2, + } + ) + df2["date"] = df2["date"].dt.tz_localize("UTC").dt.tz_convert("Europe/Madrid") + result = merge(df1, df2, how="outer", on="date") + expected = DataFrame( + { + "date": pd.date_range( + "2017-10-29 01:00", periods=7, freq="h", tz="Europe/Madrid" + ), + "value_x": [1] * 4 + [np.nan] * 3, + "value_y": [np.nan] * 4 + [2] * 3, + } + ) + tm.assert_frame_equal(result, expected) + + def test_merge_non_unique_period_index(self): + # GH #16871 + index = pd.period_range("2016-01-01", periods=16, freq="M") + df = DataFrame(list(range(len(index))), index=index, columns=["pnum"]) + df2 = concat([df, df]) + result = df.merge(df2, left_index=True, right_index=True, how="inner") + expected = DataFrame( + np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2), + columns=["pnum_x", "pnum_y"], + index=df2.sort_index().index, + ) + tm.assert_frame_equal(result, expected) + + def test_merge_on_periods(self): + left = DataFrame( + {"key": pd.period_range("20151010", periods=2, freq="D"), "value": [1, 2]} + ) + right = DataFrame( + { + "key": pd.period_range("20151011", periods=3, freq="D"), + "value": [1, 2, 3], + } + ) + + expected = DataFrame( + { + "key": pd.period_range("20151010", periods=4, freq="D"), + "value_x": [1, 2, np.nan, np.nan], + "value_y": [np.nan, 1, 2, 3], + } + ) + result = merge(left, right, on="key", how="outer") + tm.assert_frame_equal(result, expected) + + def test_merge_period_values(self): + left = DataFrame( + {"key": [1, 2], "value": pd.period_range("20151010", periods=2, freq="D")} + ) + right = DataFrame( + {"key": [2, 3], "value": pd.period_range("20151011", periods=2, freq="D")} + ) + + exp_x = pd.period_range("20151010", periods=2, freq="D") + exp_y = pd.period_range("20151011", periods=2, freq="D") + expected = DataFrame( + { + "key": [1, 2, 3], + "value_x": list(exp_x) + [pd.NaT], + "value_y": [pd.NaT] + list(exp_y), + } + ) + result = merge(left, right, on="key", how="outer") + tm.assert_frame_equal(result, expected) + assert result["value_x"].dtype == "Period[D]" + assert result["value_y"].dtype == "Period[D]" + + def test_indicator(self, dfs_for_indicator): + # PR #10054. xref #7412 and closes #8790. + df1, df2 = dfs_for_indicator + df1_copy = df1.copy() + + df2_copy = df2.copy() + + df_result = DataFrame( + { + "col1": [0, 1, 2, 3, 4, 5], + "col_conflict_x": [1, 2, np.nan, np.nan, np.nan, np.nan], + "col_left": ["a", "b", np.nan, np.nan, np.nan, np.nan], + "col_conflict_y": [np.nan, 1, 2, 3, 4, 5], + "col_right": [np.nan, 2, 2, 2, 2, 2], + } + ) + df_result["_merge"] = Categorical( + [ + "left_only", + "both", + "right_only", + "right_only", + "right_only", + "right_only", + ], + categories=["left_only", "right_only", "both"], + ) + + df_result = df_result[ + [ + "col1", + "col_conflict_x", + "col_left", + "col_conflict_y", + "col_right", + "_merge", + ] + ] + + test = merge(df1, df2, on="col1", how="outer", indicator=True) + tm.assert_frame_equal(test, df_result) + test = df1.merge(df2, on="col1", how="outer", indicator=True) + tm.assert_frame_equal(test, df_result) + + # No side effects + tm.assert_frame_equal(df1, df1_copy) + tm.assert_frame_equal(df2, df2_copy) + + # Check with custom name + df_result_custom_name = df_result + df_result_custom_name = df_result_custom_name.rename( + columns={"_merge": "custom_name"} + ) + + test_custom_name = merge( + df1, df2, on="col1", how="outer", indicator="custom_name" + ) + tm.assert_frame_equal(test_custom_name, df_result_custom_name) + test_custom_name = df1.merge( + df2, on="col1", how="outer", indicator="custom_name" + ) + tm.assert_frame_equal(test_custom_name, df_result_custom_name) + + def test_merge_indicator_arg_validation(self, dfs_for_indicator): + # Check only accepts strings and booleans + df1, df2 = dfs_for_indicator + + msg = "indicator option can only accept boolean or string arguments" + with pytest.raises(ValueError, match=msg): + merge(df1, df2, on="col1", how="outer", indicator=5) + with pytest.raises(ValueError, match=msg): + df1.merge(df2, on="col1", how="outer", indicator=5) + + def test_merge_indicator_result_integrity(self, dfs_for_indicator): + # Check result integrity + df1, df2 = dfs_for_indicator + + test2 = merge(df1, df2, on="col1", how="left", indicator=True) + assert (test2._merge != "right_only").all() + test2 = df1.merge(df2, on="col1", how="left", indicator=True) + assert (test2._merge != "right_only").all() + + test3 = merge(df1, df2, on="col1", how="right", indicator=True) + assert (test3._merge != "left_only").all() + test3 = df1.merge(df2, on="col1", how="right", indicator=True) + assert (test3._merge != "left_only").all() + + test4 = merge(df1, df2, on="col1", how="inner", indicator=True) + assert (test4._merge == "both").all() + test4 = df1.merge(df2, on="col1", how="inner", indicator=True) + assert (test4._merge == "both").all() + + def test_merge_indicator_invalid(self, dfs_for_indicator): + # Check if working name in df + df1, _ = dfs_for_indicator + + for i in ["_right_indicator", "_left_indicator", "_merge"]: + df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]}) + + msg = ( + "Cannot use `indicator=True` option when data contains a " + f"column named {i}|" + "Cannot use name of an existing column for indicator column" + ) + with pytest.raises(ValueError, match=msg): + merge(df1, df_badcolumn, on="col1", how="outer", indicator=True) + with pytest.raises(ValueError, match=msg): + df1.merge(df_badcolumn, on="col1", how="outer", indicator=True) + + # Check for name conflict with custom name + df_badcolumn = DataFrame({"col1": [1, 2], "custom_column_name": [2, 2]}) + + msg = "Cannot use name of an existing column for indicator column" + with pytest.raises(ValueError, match=msg): + merge( + df1, + df_badcolumn, + on="col1", + how="outer", + indicator="custom_column_name", + ) + with pytest.raises(ValueError, match=msg): + df1.merge( + df_badcolumn, on="col1", how="outer", indicator="custom_column_name" + ) + + def test_merge_indicator_multiple_columns(self): + # Merge on multiple columns + df3 = DataFrame({"col1": [0, 1], "col2": ["a", "b"]}) + + df4 = DataFrame({"col1": [1, 1, 3], "col2": ["b", "x", "y"]}) + + hand_coded_result = DataFrame( + {"col1": [0, 1, 1, 3], "col2": ["a", "b", "x", "y"]} + ) + hand_coded_result["_merge"] = Categorical( + ["left_only", "both", "right_only", "right_only"], + categories=["left_only", "right_only", "both"], + ) + + test5 = merge(df3, df4, on=["col1", "col2"], how="outer", indicator=True) + tm.assert_frame_equal(test5, hand_coded_result) + test5 = df3.merge(df4, on=["col1", "col2"], how="outer", indicator=True) + tm.assert_frame_equal(test5, hand_coded_result) + + def test_validation(self): + left = DataFrame( + {"a": ["a", "b", "c", "d"], "b": ["cat", "dog", "weasel", "horse"]}, + index=range(4), + ) + + right = DataFrame( + { + "a": ["a", "b", "c", "d", "e"], + "c": ["meow", "bark", "um... weasel noise?", "nay", "chirp"], + }, + index=range(5), + ) + + # Make sure no side effects. + left_copy = left.copy() + right_copy = right.copy() + + result = merge(left, right, left_index=True, right_index=True, validate="1:1") + tm.assert_frame_equal(left, left_copy) + tm.assert_frame_equal(right, right_copy) + + # make sure merge still correct + expected = DataFrame( + { + "a_x": ["a", "b", "c", "d"], + "b": ["cat", "dog", "weasel", "horse"], + "a_y": ["a", "b", "c", "d"], + "c": ["meow", "bark", "um... weasel noise?", "nay"], + }, + index=range(4), + columns=["a_x", "b", "a_y", "c"], + ) + + result = merge( + left, right, left_index=True, right_index=True, validate="one_to_one" + ) + tm.assert_frame_equal(result, expected) + + expected_2 = DataFrame( + { + "a": ["a", "b", "c", "d"], + "b": ["cat", "dog", "weasel", "horse"], + "c": ["meow", "bark", "um... weasel noise?", "nay"], + }, + index=range(4), + ) + + result = merge(left, right, on="a", validate="1:1") + tm.assert_frame_equal(left, left_copy) + tm.assert_frame_equal(right, right_copy) + tm.assert_frame_equal(result, expected_2) + + result = merge(left, right, on="a", validate="one_to_one") + tm.assert_frame_equal(result, expected_2) + + # One index, one column + expected_3 = DataFrame( + { + "b": ["cat", "dog", "weasel", "horse"], + "a": ["a", "b", "c", "d"], + "c": ["meow", "bark", "um... weasel noise?", "nay"], + }, + columns=["b", "a", "c"], + index=range(4), + ) + + left_index_reset = left.set_index("a") + result = merge( + left_index_reset, + right, + left_index=True, + right_on="a", + validate="one_to_one", + ) + tm.assert_frame_equal(result, expected_3) + + # Dups on right + right_w_dups = concat([right, DataFrame({"a": ["e"], "c": ["moo"]}, index=[4])]) + merge( + left, + right_w_dups, + left_index=True, + right_index=True, + validate="one_to_many", + ) + + msg = "Merge keys are not unique in right dataset; not a one-to-one merge" + with pytest.raises(MergeError, match=msg): + merge( + left, + right_w_dups, + left_index=True, + right_index=True, + validate="one_to_one", + ) + + with pytest.raises(MergeError, match=msg): + merge(left, right_w_dups, on="a", validate="one_to_one") + + # Dups on left + left_w_dups = concat( + [left, DataFrame({"a": ["a"], "c": ["cow"]}, index=[3])], sort=True + ) + merge( + left_w_dups, + right, + left_index=True, + right_index=True, + validate="many_to_one", + ) + + msg = "Merge keys are not unique in left dataset; not a one-to-one merge" + with pytest.raises(MergeError, match=msg): + merge( + left_w_dups, + right, + left_index=True, + right_index=True, + validate="one_to_one", + ) + + with pytest.raises(MergeError, match=msg): + merge(left_w_dups, right, on="a", validate="one_to_one") + + # Dups on both + merge(left_w_dups, right_w_dups, on="a", validate="many_to_many") + + msg = "Merge keys are not unique in right dataset; not a many-to-one merge" + with pytest.raises(MergeError, match=msg): + merge( + left_w_dups, + right_w_dups, + left_index=True, + right_index=True, + validate="many_to_one", + ) + + msg = "Merge keys are not unique in left dataset; not a one-to-many merge" + with pytest.raises(MergeError, match=msg): + merge(left_w_dups, right_w_dups, on="a", validate="one_to_many") + + # Check invalid arguments + msg = ( + '"jibberish" is not a valid argument. ' + "Valid arguments are:\n" + '- "1:1"\n' + '- "1:m"\n' + '- "m:1"\n' + '- "m:m"\n' + '- "one_to_one"\n' + '- "one_to_many"\n' + '- "many_to_one"\n' + '- "many_to_many"' + ) + with pytest.raises(ValueError, match=msg): + merge(left, right, on="a", validate="jibberish") + + # Two column merge, dups in both, but jointly no dups. + left = DataFrame( + { + "a": ["a", "a", "b", "b"], + "b": [0, 1, 0, 1], + "c": ["cat", "dog", "weasel", "horse"], + }, + index=range(4), + ) + + right = DataFrame( + { + "a": ["a", "a", "b"], + "b": [0, 1, 0], + "d": ["meow", "bark", "um... weasel noise?"], + }, + index=range(3), + ) + + expected_multi = DataFrame( + { + "a": ["a", "a", "b"], + "b": [0, 1, 0], + "c": ["cat", "dog", "weasel"], + "d": ["meow", "bark", "um... weasel noise?"], + }, + index=range(3), + ) + + msg = ( + "Merge keys are not unique in either left or right dataset; " + "not a one-to-one merge" + ) + with pytest.raises(MergeError, match=msg): + merge(left, right, on="a", validate="1:1") + + result = merge(left, right, on=["a", "b"], validate="1:1") + tm.assert_frame_equal(result, expected_multi) + + def test_merge_two_empty_df_no_division_error(self): + # GH17776, PR #17846 + a = DataFrame({"a": [], "b": [], "c": []}) + with np.errstate(divide="raise"): + merge(a, a, on=("a", "b")) + + @pytest.mark.parametrize("how", ["right", "outer"]) + @pytest.mark.parametrize( + "index,expected_index", + [ + ( + CategoricalIndex([1, 2, 4]), + CategoricalIndex([1, 2, 4, None, None, None]), + ), + ( + DatetimeIndex( + ["2001-01-01", "2002-02-02", "2003-03-03"], dtype="M8[ns]" + ), + DatetimeIndex( + ["2001-01-01", "2002-02-02", "2003-03-03", pd.NaT, pd.NaT, pd.NaT], + dtype="M8[ns]", + ), + ), + *[ + ( + Index([1, 2, 3], dtype=dtyp), + Index([1, 2, 3, None, None, None], dtype=np.float64), + ) + for dtyp in tm.ALL_REAL_NUMPY_DTYPES + ], + ( + IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4)]), + IntervalIndex.from_tuples( + [(1, 2), (2, 3), (3, 4), np.nan, np.nan, np.nan] + ), + ), + ( + PeriodIndex(["2001-01-01", "2001-01-02", "2001-01-03"], freq="D"), + PeriodIndex( + ["2001-01-01", "2001-01-02", "2001-01-03", pd.NaT, pd.NaT, pd.NaT], + freq="D", + ), + ), + ( + TimedeltaIndex(["1d", "2d", "3d"]), + TimedeltaIndex(["1d", "2d", "3d", pd.NaT, pd.NaT, pd.NaT]), + ), + ], + ) + def test_merge_on_index_with_more_values(self, how, index, expected_index): + # GH 24212 + # pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that + # -1 is interpreted as a missing value instead of the last element + df1 = DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index) + df2 = DataFrame({"b": [0, 1, 2, 3, 4, 5]}) + result = df1.merge(df2, left_on="key", right_index=True, how=how) + expected = DataFrame( + [ + [0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [np.nan, 3, 3], + [np.nan, 4, 4], + [np.nan, 5, 5], + ], + columns=["a", "key", "b"], + ) + expected.set_index(expected_index, inplace=True) + tm.assert_frame_equal(result, expected) + + def test_merge_right_index_right(self): + # Note: the expected output here is probably incorrect. + # See https://github.com/pandas-dev/pandas/issues/17257 for more. + # We include this as a regression test for GH-24897. + left = DataFrame({"a": [1, 2, 3], "key": [0, 1, 1]}) + right = DataFrame({"b": [1, 2, 3]}) + + expected = DataFrame( + {"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]}, + columns=["a", "key", "b"], + index=[0, 1, 2, np.nan], + ) + result = left.merge(right, left_on="key", right_index=True, how="right") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("how", ["left", "right"]) + def test_merge_preserves_row_order(self, how): + # GH 27453 + left_df = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]}) + right_df = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]}) + result = left_df.merge(right_df, on=["animal", "max_speed"], how=how) + if how == "right": + expected = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]}) + else: + expected = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]}) + tm.assert_frame_equal(result, expected) + + def test_merge_take_missing_values_from_index_of_other_dtype(self): + # GH 24212 + left = DataFrame( + { + "a": [1, 2, 3], + "key": Categorical(["a", "a", "b"], categories=list("abc")), + } + ) + right = DataFrame({"b": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"])) + result = left.merge(right, left_on="key", right_index=True, how="right") + expected = DataFrame( + { + "a": [1, 2, 3, None], + "key": Categorical(["a", "a", "b", "c"]), + "b": [1, 1, 2, 3], + }, + index=[0, 1, 2, np.nan], + ) + expected = expected.reindex(columns=["a", "key", "b"]) + tm.assert_frame_equal(result, expected) + + def test_merge_readonly(self): + # https://github.com/pandas-dev/pandas/issues/27943 + data1 = DataFrame( + np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"] + ) + data2 = DataFrame( + np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"] + ) + + # make each underlying block array / column array read-only + for arr in data1._mgr.arrays: + arr.flags.writeable = False + + data1.merge(data2) # no error + + +def _check_merge(x, y): + for how in ["inner", "left", "outer"]: + for sort in [True, False]: + result = x.join(y, how=how, sort=sort) + + expected = merge(x.reset_index(), y.reset_index(), how=how, sort=sort) + expected = expected.set_index("index") + + # TODO check_names on merge? + tm.assert_frame_equal(result, expected, check_names=False) + + +class TestMergeDtypes: + @pytest.mark.parametrize( + "right_vals", [["foo", "bar"], Series(["foo", "bar"]).astype("category")] + ) + def test_different(self, right_vals): + left = DataFrame( + { + "A": ["foo", "bar"], + "B": Series(["foo", "bar"]).astype("category"), + "C": [1, 2], + "D": [1.0, 2.0], + "E": Series([1, 2], dtype="uint64"), + "F": Series([1, 2], dtype="int32"), + } + ) + right = DataFrame({"A": right_vals}) + + # GH 9780 + # We allow merging on object and categorical cols and cast + # categorical cols to object + result = merge(left, right, on="A") + assert is_object_dtype(result.A.dtype) or is_string_dtype(result.A.dtype) + + @pytest.mark.parametrize( + "d1", [np.int64, np.int32, np.intc, np.int16, np.int8, np.uint8] + ) + @pytest.mark.parametrize("d2", [np.int64, np.float64, np.float32, np.float16]) + def test_join_multi_dtypes(self, d1, d2): + dtype1 = np.dtype(d1) + dtype2 = np.dtype(d2) + + left = DataFrame( + { + "k1": np.array([0, 1, 2] * 8, dtype=dtype1), + "k2": ["foo", "bar"] * 12, + "v": np.array(np.arange(24), dtype=np.int64), + } + ) + + index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")]) + right = DataFrame({"v2": np.array([5, 7], dtype=dtype2)}, index=index) + + result = left.join(right, on=["k1", "k2"]) + + expected = left.copy() + + if dtype2.kind == "i": + dtype2 = np.dtype("float64") + expected["v2"] = np.array(np.nan, dtype=dtype2) + expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5 + expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7 + + tm.assert_frame_equal(result, expected) + + result = left.join(right, on=["k1", "k2"], sort=True) + expected.sort_values(["k1", "k2"], kind="mergesort", inplace=True) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "int_vals, float_vals, exp_vals", + [ + ([1, 2, 3], [1.0, 2.0, 3.0], {"X": [1, 2, 3], "Y": [1.0, 2.0, 3.0]}), + ([1, 2, 3], [1.0, 3.0], {"X": [1, 3], "Y": [1.0, 3.0]}), + ([1, 2], [1.0, 2.0, 3.0], {"X": [1, 2], "Y": [1.0, 2.0]}), + ], + ) + def test_merge_on_ints_floats(self, int_vals, float_vals, exp_vals): + # GH 16572 + # Check that float column is not cast to object if + # merging on float and int columns + A = DataFrame({"X": int_vals}) + B = DataFrame({"Y": float_vals}) + expected = DataFrame(exp_vals) + + result = A.merge(B, left_on="X", right_on="Y") + tm.assert_frame_equal(result, expected) + + result = B.merge(A, left_on="Y", right_on="X") + tm.assert_frame_equal(result, expected[["Y", "X"]]) + + def test_merge_key_dtype_cast(self): + # GH 17044 + df1 = DataFrame({"key": [1.0, 2.0], "v1": [10, 20]}, columns=["key", "v1"]) + df2 = DataFrame({"key": [2], "v2": [200]}, columns=["key", "v2"]) + result = df1.merge(df2, on="key", how="left") + expected = DataFrame( + {"key": [1.0, 2.0], "v1": [10, 20], "v2": [np.nan, 200.0]}, + columns=["key", "v1", "v2"], + ) + tm.assert_frame_equal(result, expected) + + def test_merge_on_ints_floats_warning(self): + # GH 16572 + # merge will produce a warning when merging on int and + # float columns where the float values are not exactly + # equal to their int representation + A = DataFrame({"X": [1, 2, 3]}) + B = DataFrame({"Y": [1.1, 2.5, 3.0]}) + expected = DataFrame({"X": [3], "Y": [3.0]}) + + with tm.assert_produces_warning(UserWarning): + result = A.merge(B, left_on="X", right_on="Y") + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(UserWarning): + result = B.merge(A, left_on="Y", right_on="X") + tm.assert_frame_equal(result, expected[["Y", "X"]]) + + # test no warning if float has NaNs + B = DataFrame({"Y": [np.nan, np.nan, 3.0]}) + + with tm.assert_produces_warning(None): + result = B.merge(A, left_on="Y", right_on="X") + tm.assert_frame_equal(result, expected[["Y", "X"]]) + + def test_merge_incompat_infer_boolean_object(self): + # GH21119: bool + object bool merge OK + df1 = DataFrame({"key": Series([True, False], dtype=object)}) + df2 = DataFrame({"key": [True, False]}) + + expected = DataFrame({"key": [True, False]}, dtype=object) + result = merge(df1, df2, on="key") + tm.assert_frame_equal(result, expected) + result = merge(df2, df1, on="key") + tm.assert_frame_equal(result, expected) + + def test_merge_incompat_infer_boolean_object_with_missing(self): + # GH21119: bool + object bool merge OK + # with missing value + df1 = DataFrame({"key": Series([True, False, np.nan], dtype=object)}) + df2 = DataFrame({"key": [True, False]}) + + expected = DataFrame({"key": [True, False]}, dtype=object) + result = merge(df1, df2, on="key") + tm.assert_frame_equal(result, expected) + result = merge(df2, df1, on="key") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "df1_vals, df2_vals", + [ + # merge on category coerces to object + ([0, 1, 2], Series(["a", "b", "a"]).astype("category")), + ([0.0, 1.0, 2.0], Series(["a", "b", "a"]).astype("category")), + # no not infer + ([0, 1], Series([False, True], dtype=object)), + ([0, 1], Series([False, True], dtype=bool)), + ], + ) + def test_merge_incompat_dtypes_are_ok(self, df1_vals, df2_vals): + # these are explicitly allowed incompat merges, that pass thru + # the result type is dependent on if the values on the rhs are + # inferred, otherwise these will be coerced to object + + df1 = DataFrame({"A": df1_vals}) + df2 = DataFrame({"A": df2_vals}) + + result = merge(df1, df2, on=["A"]) + assert is_object_dtype(result.A.dtype) + result = merge(df2, df1, on=["A"]) + assert is_object_dtype(result.A.dtype) or is_string_dtype(result.A.dtype) + + @pytest.mark.parametrize( + "df1_vals, df2_vals", + [ + # do not infer to numeric + (Series([1, 2], dtype="uint64"), ["a", "b", "c"]), + (Series([1, 2], dtype="int32"), ["a", "b", "c"]), + ([0, 1, 2], ["0", "1", "2"]), + ([0.0, 1.0, 2.0], ["0", "1", "2"]), + ([0, 1, 2], ["0", "1", "2"]), + ( + pd.date_range("1/1/2011", periods=2, freq="D"), + ["2011-01-01", "2011-01-02"], + ), + (pd.date_range("1/1/2011", periods=2, freq="D"), [0, 1]), + (pd.date_range("1/1/2011", periods=2, freq="D"), [0.0, 1.0]), + ( + pd.date_range("20130101", periods=3), + pd.date_range("20130101", periods=3, tz="US/Eastern"), + ), + ], + ) + def test_merge_incompat_dtypes_error(self, df1_vals, df2_vals): + # GH 9780, GH 15800 + # Raise a ValueError when a user tries to merge on + # dtypes that are incompatible (e.g., obj and int/float) + + df1 = DataFrame({"A": df1_vals}) + df2 = DataFrame({"A": df2_vals}) + + msg = ( + f"You are trying to merge on {df1['A'].dtype} and {df2['A'].dtype} " + "columns for key 'A'. If you wish to proceed you should use pd.concat" + ) + msg = re.escape(msg) + with pytest.raises(ValueError, match=msg): + merge(df1, df2, on=["A"]) + + # Check that error still raised when swapping order of dataframes + msg = ( + f"You are trying to merge on {df2['A'].dtype} and {df1['A'].dtype} " + "columns for key 'A'. If you wish to proceed you should use pd.concat" + ) + msg = re.escape(msg) + with pytest.raises(ValueError, match=msg): + merge(df2, df1, on=["A"]) + + # Check that error still raised when merging on multiple columns + # The error message should mention the first incompatible column + if len(df1_vals) == len(df2_vals): + # Column A in df1 and df2 is of compatible (the same) dtype + # Columns B and C in df1 and df2 are of incompatible dtypes + df3 = DataFrame({"A": df2_vals, "B": df1_vals, "C": df1_vals}) + df4 = DataFrame({"A": df2_vals, "B": df2_vals, "C": df2_vals}) + + # Check that error raised correctly when merging all columns A, B, and C + # The error message should mention key 'B' + msg = ( + f"You are trying to merge on {df3['B'].dtype} and {df4['B'].dtype} " + "columns for key 'B'. If you wish to proceed you should use pd.concat" + ) + msg = re.escape(msg) + with pytest.raises(ValueError, match=msg): + merge(df3, df4) + + # Check that error raised correctly when merging columns A and C + # The error message should mention key 'C' + msg = ( + f"You are trying to merge on {df3['C'].dtype} and {df4['C'].dtype} " + "columns for key 'C'. If you wish to proceed you should use pd.concat" + ) + msg = re.escape(msg) + with pytest.raises(ValueError, match=msg): + merge(df3, df4, on=["A", "C"]) + + @pytest.mark.parametrize( + "expected_data, how", + [ + ([1, 2], "outer"), + ([], "inner"), + ([2], "right"), + ([1], "left"), + ], + ) + def test_merge_EA_dtype(self, any_numeric_ea_dtype, how, expected_data): + # GH#40073 + d1 = DataFrame([(1,)], columns=["id"], dtype=any_numeric_ea_dtype) + d2 = DataFrame([(2,)], columns=["id"], dtype=any_numeric_ea_dtype) + result = merge(d1, d2, how=how) + exp_index = RangeIndex(len(expected_data)) + expected = DataFrame( + expected_data, index=exp_index, columns=["id"], dtype=any_numeric_ea_dtype + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "expected_data, how", + [ + (["a", "b"], "outer"), + ([], "inner"), + (["b"], "right"), + (["a"], "left"), + ], + ) + def test_merge_string_dtype(self, how, expected_data, any_string_dtype): + # GH#40073 + d1 = DataFrame([("a",)], columns=["id"], dtype=any_string_dtype) + d2 = DataFrame([("b",)], columns=["id"], dtype=any_string_dtype) + result = merge(d1, d2, how=how) + exp_idx = RangeIndex(len(expected_data)) + expected = DataFrame( + expected_data, index=exp_idx, columns=["id"], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "how, expected_data", + [ + ("inner", [[True, 1, 4], [False, 5, 3]]), + ("outer", [[False, 5, 3], [True, 1, 4]]), + ("left", [[True, 1, 4], [False, 5, 3]]), + ("right", [[False, 5, 3], [True, 1, 4]]), + ], + ) + def test_merge_bool_dtype(self, how, expected_data): + # GH#40073 + df1 = DataFrame({"A": [True, False], "B": [1, 5]}) + df2 = DataFrame({"A": [False, True], "C": [3, 4]}) + result = merge(df1, df2, how=how) + expected = DataFrame(expected_data, columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + def test_merge_ea_with_string(self, join_type, string_dtype): + # GH 43734 Avoid the use of `assign` with multi-index + df1 = DataFrame( + data={ + ("lvl0", "lvl1-a"): ["1", "2", "3", "4", None], + ("lvl0", "lvl1-b"): ["4", "5", "6", "7", "8"], + }, + dtype=pd.StringDtype(), + ) + df1_copy = df1.copy() + df2 = DataFrame( + data={ + ("lvl0", "lvl1-a"): ["1", "2", "3", pd.NA, "5"], + ("lvl0", "lvl1-c"): ["7", "8", "9", pd.NA, "11"], + }, + dtype=string_dtype, + ) + df2_copy = df2.copy() + merged = merge(left=df1, right=df2, on=[("lvl0", "lvl1-a")], how=join_type) + + # No change in df1 and df2 + tm.assert_frame_equal(df1, df1_copy) + tm.assert_frame_equal(df2, df2_copy) + + # Check the expected types for the merged data frame + expected = Series( + [np.dtype("O"), pd.StringDtype(), np.dtype("O")], + index=MultiIndex.from_tuples( + [("lvl0", "lvl1-a"), ("lvl0", "lvl1-b"), ("lvl0", "lvl1-c")] + ), + ) + tm.assert_series_equal(merged.dtypes, expected) + + @pytest.mark.parametrize( + "left_empty, how, exp", + [ + (False, "left", "left"), + (False, "right", "empty"), + (False, "inner", "empty"), + (False, "outer", "left"), + (False, "cross", "empty_cross"), + (True, "left", "empty"), + (True, "right", "right"), + (True, "inner", "empty"), + (True, "outer", "right"), + (True, "cross", "empty_cross"), + ], + ) + def test_merge_empty(self, left_empty, how, exp): + left = DataFrame({"A": [2, 1], "B": [3, 4]}) + right = DataFrame({"A": [1], "C": [5]}, dtype="int64") + + if left_empty: + left = left.head(0) + else: + right = right.head(0) + + result = left.merge(right, how=how) + + if exp == "left": + expected = DataFrame({"A": [2, 1], "B": [3, 4], "C": [np.nan, np.nan]}) + elif exp == "right": + expected = DataFrame({"A": [1], "B": [np.nan], "C": [5]}) + elif exp == "empty": + expected = DataFrame(columns=["A", "B", "C"], dtype="int64") + elif exp == "empty_cross": + expected = DataFrame(columns=["A_x", "B", "A_y", "C"], dtype="int64") + + if how == "outer": + expected = expected.sort_values("A", ignore_index=True) + + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def left(): + return DataFrame( + { + "X": Series( + np.random.default_rng(2).choice(["foo", "bar"], size=(10,)) + ).astype(CategoricalDtype(["foo", "bar"])), + "Y": np.random.default_rng(2).choice(["one", "two", "three"], size=(10,)), + } + ) + + +@pytest.fixture +def right(): + return DataFrame( + { + "X": Series(["foo", "bar"]).astype(CategoricalDtype(["foo", "bar"])), + "Z": [1, 2], + } + ) + + +class TestMergeCategorical: + def test_identical(self, left, using_infer_string): + # merging on the same, should preserve dtypes + merged = merge(left, left, on="X") + result = merged.dtypes.sort_index() + dtype = np.dtype("O") if not using_infer_string else "string" + expected = Series( + [CategoricalDtype(categories=["foo", "bar"]), dtype, dtype], + index=["X", "Y_x", "Y_y"], + ) + tm.assert_series_equal(result, expected) + + def test_basic(self, left, right, using_infer_string): + # we have matching Categorical dtypes in X + # so should preserve the merged column + merged = merge(left, right, on="X") + result = merged.dtypes.sort_index() + dtype = np.dtype("O") if not using_infer_string else "string" + expected = Series( + [ + CategoricalDtype(categories=["foo", "bar"]), + dtype, + np.dtype("int64"), + ], + index=["X", "Y", "Z"], + ) + tm.assert_series_equal(result, expected) + + def test_merge_categorical(self): + # GH 9426 + + right = DataFrame( + { + "c": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e"}, + "d": {0: "null", 1: "null", 2: "null", 3: "null", 4: "null"}, + } + ) + left = DataFrame( + { + "a": {0: "f", 1: "f", 2: "f", 3: "f", 4: "f"}, + "b": {0: "g", 1: "g", 2: "g", 3: "g", 4: "g"}, + } + ) + df = merge(left, right, how="left", left_on="b", right_on="c") + + # object-object + expected = df.copy() + + # object-cat + # note that we propagate the category + # because we don't have any matching rows + cright = right.copy() + cright["d"] = cright["d"].astype("category") + result = merge(left, cright, how="left", left_on="b", right_on="c") + expected["d"] = expected["d"].astype(CategoricalDtype(["null"])) + tm.assert_frame_equal(result, expected) + + # cat-object + cleft = left.copy() + cleft["b"] = cleft["b"].astype("category") + result = merge(cleft, cright, how="left", left_on="b", right_on="c") + tm.assert_frame_equal(result, expected) + + # cat-cat + cright = right.copy() + cright["d"] = cright["d"].astype("category") + cleft = left.copy() + cleft["b"] = cleft["b"].astype("category") + result = merge(cleft, cright, how="left", left_on="b", right_on="c") + tm.assert_frame_equal(result, expected) + + def tests_merge_categorical_unordered_equal(self): + # GH-19551 + df1 = DataFrame( + { + "Foo": Categorical(["A", "B", "C"], categories=["A", "B", "C"]), + "Left": ["A0", "B0", "C0"], + } + ) + + df2 = DataFrame( + { + "Foo": Categorical(["C", "B", "A"], categories=["C", "B", "A"]), + "Right": ["C1", "B1", "A1"], + } + ) + result = merge(df1, df2, on=["Foo"]) + expected = DataFrame( + { + "Foo": Categorical(["A", "B", "C"]), + "Left": ["A0", "B0", "C0"], + "Right": ["A1", "B1", "C1"], + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_multiindex_merge_with_unordered_categoricalindex(self, ordered): + # GH 36973 + pcat = CategoricalDtype(categories=["P2", "P1"], ordered=ordered) + df1 = DataFrame( + { + "id": ["C", "C", "D"], + "p": Categorical(["P2", "P1", "P2"], dtype=pcat), + "a": [0, 1, 2], + } + ).set_index(["id", "p"]) + df2 = DataFrame( + { + "id": ["A", "C", "C"], + "p": Categorical(["P2", "P2", "P1"], dtype=pcat), + "d1": [10, 11, 12], + } + ).set_index(["id", "p"]) + result = merge(df1, df2, how="left", left_index=True, right_index=True) + expected = DataFrame( + { + "id": ["C", "C", "D"], + "p": Categorical(["P2", "P1", "P2"], dtype=pcat), + "a": [0, 1, 2], + "d1": [11.0, 12.0, np.nan], + } + ).set_index(["id", "p"]) + tm.assert_frame_equal(result, expected) + + def test_other_columns(self, left, right, using_infer_string): + # non-merge columns should preserve if possible + right = right.assign(Z=right.Z.astype("category")) + + merged = merge(left, right, on="X") + result = merged.dtypes.sort_index() + dtype = np.dtype("O") if not using_infer_string else "string" + expected = Series( + [ + CategoricalDtype(categories=["foo", "bar"]), + dtype, + CategoricalDtype(categories=[1, 2]), + ], + index=["X", "Y", "Z"], + ) + tm.assert_series_equal(result, expected) + + # categories are preserved + assert left.X.values._categories_match_up_to_permutation(merged.X.values) + assert right.Z.values._categories_match_up_to_permutation(merged.Z.values) + + @pytest.mark.parametrize( + "change", + [ + lambda x: x, + lambda x: x.astype(CategoricalDtype(["foo", "bar", "bah"])), + lambda x: x.astype(CategoricalDtype(ordered=True)), + ], + ) + def test_dtype_on_merged_different( + self, change, join_type, left, right, using_infer_string + ): + # our merging columns, X now has 2 different dtypes + # so we must be object as a result + + X = change(right.X.astype("object")) + right = right.assign(X=X) + assert isinstance(left.X.values.dtype, CategoricalDtype) + # assert not left.X.values._categories_match_up_to_permutation(right.X.values) + + merged = merge(left, right, on="X", how=join_type) + + result = merged.dtypes.sort_index() + dtype = np.dtype("O") if not using_infer_string else "string" + expected = Series([dtype, dtype, np.dtype("int64")], index=["X", "Y", "Z"]) + tm.assert_series_equal(result, expected) + + def test_self_join_multiple_categories(self): + # GH 16767 + # non-duplicates should work with multiple categories + m = 5 + df = DataFrame( + { + "a": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"] * m, + "b": ["t", "w", "x", "y", "z"] * 2 * m, + "c": [ + letter + for each in ["m", "n", "u", "p", "o"] + for letter in [each] * 2 * m + ], + "d": [ + letter + for each in [ + "aa", + "bb", + "cc", + "dd", + "ee", + "ff", + "gg", + "hh", + "ii", + "jj", + ] + for letter in [each] * m + ], + } + ) + + # change them all to categorical variables + df = df.apply(lambda x: x.astype("category")) + + # self-join should equal ourselves + result = merge(df, df, on=list(df.columns)) + + tm.assert_frame_equal(result, df) + + def test_dtype_on_categorical_dates(self): + # GH 16900 + # dates should not be coerced to ints + + df = DataFrame( + [[date(2001, 1, 1), 1.1], [date(2001, 1, 2), 1.3]], columns=["date", "num2"] + ) + df["date"] = df["date"].astype("category") + + df2 = DataFrame( + [[date(2001, 1, 1), 1.3], [date(2001, 1, 3), 1.4]], columns=["date", "num4"] + ) + df2["date"] = df2["date"].astype("category") + + expected_outer = DataFrame( + [ + [pd.Timestamp("2001-01-01").date(), 1.1, 1.3], + [pd.Timestamp("2001-01-02").date(), 1.3, np.nan], + [pd.Timestamp("2001-01-03").date(), np.nan, 1.4], + ], + columns=["date", "num2", "num4"], + ) + result_outer = merge(df, df2, how="outer", on=["date"]) + tm.assert_frame_equal(result_outer, expected_outer) + + expected_inner = DataFrame( + [[pd.Timestamp("2001-01-01").date(), 1.1, 1.3]], + columns=["date", "num2", "num4"], + ) + result_inner = merge(df, df2, how="inner", on=["date"]) + tm.assert_frame_equal(result_inner, expected_inner) + + @pytest.mark.parametrize("ordered", [True, False]) + @pytest.mark.parametrize( + "category_column,categories,expected_categories", + [ + ([False, True, True, False], [True, False], [True, False]), + ([2, 1, 1, 2], [1, 2], [1, 2]), + (["False", "True", "True", "False"], ["True", "False"], ["True", "False"]), + ], + ) + def test_merging_with_bool_or_int_cateorical_column( + self, category_column, categories, expected_categories, ordered + ): + # GH 17187 + # merging with a boolean/int categorical column + df1 = DataFrame({"id": [1, 2, 3, 4], "cat": category_column}) + df1["cat"] = df1["cat"].astype(CategoricalDtype(categories, ordered=ordered)) + df2 = DataFrame({"id": [2, 4], "num": [1, 9]}) + result = df1.merge(df2) + expected = DataFrame({"id": [2, 4], "cat": expected_categories, "num": [1, 9]}) + expected["cat"] = expected["cat"].astype( + CategoricalDtype(categories, ordered=ordered) + ) + tm.assert_frame_equal(expected, result) + + def test_merge_on_int_array(self): + # GH 23020 + df = DataFrame({"A": Series([1, 2, np.nan], dtype="Int64"), "B": 1}) + result = merge(df, df, on="A") + expected = DataFrame( + {"A": Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1} + ) + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def left_df(): + return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0]) + + +@pytest.fixture +def right_df(): + return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2]) + + +class TestMergeOnIndexes: + @pytest.mark.parametrize( + "how, sort, expected", + [ + ("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])), + ("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])), + ( + "left", + False, + DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]), + ), + ( + "left", + True, + DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]), + ), + ( + "right", + False, + DataFrame( + {"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2] + ), + ), + ( + "right", + True, + DataFrame( + {"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3] + ), + ), + ( + "outer", + False, + DataFrame( + {"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]}, + index=[0, 1, 2, 3], + ), + ), + ( + "outer", + True, + DataFrame( + {"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]}, + index=[0, 1, 2, 3], + ), + ), + ], + ) + def test_merge_on_indexes(self, left_df, right_df, how, sort, expected): + result = merge( + left_df, right_df, left_index=True, right_index=True, how=how, sort=sort + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "index", + [Index([1, 2], dtype=dtyp, name="index_col") for dtyp in tm.ALL_REAL_NUMPY_DTYPES] + + [ + CategoricalIndex(["A", "B"], categories=["A", "B"], name="index_col"), + RangeIndex(start=0, stop=2, name="index_col"), + DatetimeIndex(["2018-01-01", "2018-01-02"], name="index_col"), + ], + ids=lambda x: f"{type(x).__name__}[{x.dtype}]", +) +def test_merge_index_types(index): + # gh-20777 + # assert key access is consistent across index types + left = DataFrame({"left_data": [1, 2]}, index=index) + right = DataFrame({"right_data": [1.0, 2.0]}, index=index) + + result = left.merge(right, on=["index_col"]) + + expected = DataFrame({"left_data": [1, 2], "right_data": [1.0, 2.0]}, index=index) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "on,left_on,right_on,left_index,right_index,nm", + [ + (["outer", "inner"], None, None, False, False, "B"), + (None, None, None, True, True, "B"), + (None, ["outer", "inner"], None, False, True, "B"), + (None, None, ["outer", "inner"], True, False, "B"), + (["outer", "inner"], None, None, False, False, None), + (None, None, None, True, True, None), + (None, ["outer", "inner"], None, False, True, None), + (None, None, ["outer", "inner"], True, False, None), + ], +) +def test_merge_series(on, left_on, right_on, left_index, right_index, nm): + # GH 21220 + a = DataFrame( + {"A": [1, 2, 3, 4]}, + index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]), + ) + b = Series( + [1, 2, 3, 4], + index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]), + name=nm, + ) + expected = DataFrame( + {"A": [2, 4], "B": [1, 3]}, + index=MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]), + ) + if nm is not None: + result = merge( + a, + b, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + ) + tm.assert_frame_equal(result, expected) + else: + msg = "Cannot merge a Series without a name" + with pytest.raises(ValueError, match=msg): + result = merge( + a, + b, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + ) + + +def test_merge_series_multilevel(): + # GH#47946 + # GH 40993: For raising, enforced in 2.0 + a = DataFrame( + {"A": [1, 2, 3, 4]}, + index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]), + ) + b = Series( + [1, 2, 3, 4], + index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]), + name=("B", "C"), + ) + with pytest.raises( + MergeError, match="Not allowed to merge between different levels" + ): + merge(a, b, on=["outer", "inner"]) + + +@pytest.mark.parametrize( + "col1, col2, kwargs, expected_cols", + [ + (0, 0, {"suffixes": ("", "_dup")}, ["0", "0_dup"]), + (0, 0, {"suffixes": (None, "_dup")}, [0, "0_dup"]), + (0, 0, {"suffixes": ("_x", "_y")}, ["0_x", "0_y"]), + (0, 0, {"suffixes": ["_x", "_y"]}, ["0_x", "0_y"]), + ("a", 0, {"suffixes": (None, "_y")}, ["a", 0]), + (0.0, 0.0, {"suffixes": ("_x", None)}, ["0.0_x", 0.0]), + ("b", "b", {"suffixes": (None, "_y")}, ["b", "b_y"]), + ("a", "a", {"suffixes": ("_x", None)}, ["a_x", "a"]), + ("a", "b", {"suffixes": ("_x", None)}, ["a", "b"]), + ("a", "a", {"suffixes": (None, "_x")}, ["a", "a_x"]), + (0, 0, {"suffixes": ("_a", None)}, ["0_a", 0]), + ("a", "a", {}, ["a_x", "a_y"]), + (0, 0, {}, ["0_x", "0_y"]), + ], +) +def test_merge_suffix(col1, col2, kwargs, expected_cols): + # issue: 24782 + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [4, 5, 6]}) + + expected = DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols) + + result = a.merge(b, left_index=True, right_index=True, **kwargs) + tm.assert_frame_equal(result, expected) + + result = merge(a, b, left_index=True, right_index=True, **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "how,expected", + [ + ( + "right", + DataFrame( + {"A": [100, 200, 300], "B1": [60, 70, np.nan], "B2": [600, 700, 800]} + ), + ), + ( + "outer", + DataFrame( + { + "A": [1, 100, 200, 300], + "B1": [80, 60, 70, np.nan], + "B2": [np.nan, 600, 700, 800], + } + ), + ), + ], +) +def test_merge_duplicate_suffix(how, expected): + left_df = DataFrame({"A": [100, 200, 1], "B": [60, 70, 80]}) + right_df = DataFrame({"A": [100, 200, 300], "B": [600, 700, 800]}) + result = merge(left_df, right_df, on="A", how=how, suffixes=("_x", "_x")) + expected.columns = ["A", "B_x", "B_x"] + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "col1, col2, suffixes", + [("a", "a", (None, None)), ("a", "a", ("", None)), (0, 0, (None, ""))], +) +def test_merge_suffix_error(col1, col2, suffixes): + # issue: 24782 + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [3, 4, 5]}) + + # TODO: might reconsider current raise behaviour, see issue 24782 + msg = "columns overlap but no suffix specified" + with pytest.raises(ValueError, match=msg): + merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + + +@pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}]) +def test_merge_suffix_raises(suffixes): + a = DataFrame({"a": [1, 2, 3]}) + b = DataFrame({"b": [3, 4, 5]}) + + with pytest.raises(TypeError, match="Passing 'suffixes' as a"): + merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + + +@pytest.mark.parametrize( + "col1, col2, suffixes, msg", + [ + ("a", "a", ("a", "b", "c"), r"too many values to unpack \(expected 2\)"), + ("a", "a", tuple("a"), r"not enough values to unpack \(expected 2, got 1\)"), + ], +) +def test_merge_suffix_length_error(col1, col2, suffixes, msg): + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [3, 4, 5]}) + + with pytest.raises(ValueError, match=msg): + merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + + +@pytest.mark.parametrize("cat_dtype", ["one", "two"]) +@pytest.mark.parametrize("reverse", [True, False]) +def test_merge_equal_cat_dtypes(cat_dtype, reverse): + # see gh-22501 + cat_dtypes = { + "one": CategoricalDtype(categories=["a", "b", "c"], ordered=False), + "two": CategoricalDtype(categories=["a", "b", "c"], ordered=False), + } + + df1 = DataFrame( + {"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), "left": [1, 2, 3]} + ).set_index("foo") + + data_foo = ["a", "b", "c"] + data_right = [1, 2, 3] + + if reverse: + data_foo.reverse() + data_right.reverse() + + df2 = DataFrame( + {"foo": Series(data_foo).astype(cat_dtypes[cat_dtype]), "right": data_right} + ).set_index("foo") + + result = df1.merge(df2, left_index=True, right_index=True) + + expected = DataFrame( + { + "left": [1, 2, 3], + "right": [1, 2, 3], + "foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), + } + ).set_index("foo") + + tm.assert_frame_equal(result, expected) + + +def test_merge_equal_cat_dtypes2(): + # see gh-22501 + cat_dtype = CategoricalDtype(categories=["a", "b", "c"], ordered=False) + + # Test Data + df1 = DataFrame( + {"foo": Series(["a", "b"]).astype(cat_dtype), "left": [1, 2]} + ).set_index("foo") + + df2 = DataFrame( + {"foo": Series(["a", "b", "c"]).astype(cat_dtype), "right": [3, 2, 1]} + ).set_index("foo") + + result = df1.merge(df2, left_index=True, right_index=True) + + expected = DataFrame( + {"left": [1, 2], "right": [3, 2], "foo": Series(["a", "b"]).astype(cat_dtype)} + ).set_index("foo") + + tm.assert_frame_equal(result, expected) + + +def test_merge_on_cat_and_ext_array(): + # GH 28668 + right = DataFrame( + {"a": Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")} + ) + left = right.copy() + left["a"] = left["a"].astype("category") + + result = merge(left, right, how="inner", on="a") + expected = right.copy() + + tm.assert_frame_equal(result, expected) + + +def test_merge_multiindex_columns(): + # Issue #28518 + # Verify that merging two dataframes give the expected labels + # The original cause of this issue come from a bug lexsort_depth and is tested in + # test_lexsort_depth + + letters = ["a", "b", "c", "d"] + numbers = ["1", "2", "3"] + index = MultiIndex.from_product((letters, numbers), names=["outer", "inner"]) + + frame_x = DataFrame(columns=index) + frame_x["id"] = "" + frame_y = DataFrame(columns=index) + frame_y["id"] = "" + + l_suf = "_x" + r_suf = "_y" + result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf))) + + # Constructing the expected results + tuples = [(letter + l_suf, num) for letter in letters for num in numbers] + tuples += [("id", "")] + tuples += [(letter + r_suf, num) for letter in letters for num in numbers] + + expected_index = MultiIndex.from_tuples(tuples, names=["outer", "inner"]) + expected = DataFrame(columns=expected_index) + + tm.assert_frame_equal(result, expected, check_dtype=False) + + +def test_merge_datetime_upcast_dtype(): + # https://github.com/pandas-dev/pandas/issues/31208 + df1 = DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]}) + df2 = DataFrame( + {"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])} + ) + result = merge(df1, df2, how="left", on="y") + expected = DataFrame( + { + "x": ["a", "b", "c"], + "y": ["1", "2", "4"], + "z": pd.to_datetime(["2000", "2001", "NaT"]), + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("n_categories", [5, 128]) +def test_categorical_non_unique_monotonic(n_categories): + # GH 28189 + # With n_categories as 5, we test the int8 case is hit in libjoin, + # with n_categories as 128 we test the int16 case. + left_index = CategoricalIndex([0] + list(range(n_categories))) + df1 = DataFrame(range(n_categories + 1), columns=["value"], index=left_index) + df2 = DataFrame( + [[6]], + columns=["value"], + index=CategoricalIndex([0], categories=list(range(n_categories))), + ) + + result = merge(df1, df2, how="left", left_index=True, right_index=True) + expected = DataFrame( + [[i, 6.0] if i < 2 else [i, np.nan] for i in range(n_categories + 1)], + columns=["value_x", "value_y"], + index=left_index, + ) + tm.assert_frame_equal(expected, result) + + +def test_merge_join_categorical_multiindex(): + # From issue 16627 + a = { + "Cat1": Categorical(["a", "b", "a", "c", "a", "b"], ["a", "b", "c"]), + "Int1": [0, 1, 0, 1, 0, 0], + } + a = DataFrame(a) + + b = { + "Cat": Categorical(["a", "b", "c", "a", "b", "c"], ["a", "b", "c"]), + "Int": [0, 0, 0, 1, 1, 1], + "Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6], + } + b = DataFrame(b).set_index(["Cat", "Int"])["Factor"] + + expected = merge( + a, + b.reset_index(), + left_on=["Cat1", "Int1"], + right_on=["Cat", "Int"], + how="left", + ) + expected = expected.drop(["Cat", "Int"], axis=1) + result = a.join(b, on=["Cat1", "Int1"]) + tm.assert_frame_equal(expected, result) + + # Same test, but with ordered categorical + a = { + "Cat1": Categorical( + ["a", "b", "a", "c", "a", "b"], ["b", "a", "c"], ordered=True + ), + "Int1": [0, 1, 0, 1, 0, 0], + } + a = DataFrame(a) + + b = { + "Cat": Categorical( + ["a", "b", "c", "a", "b", "c"], ["b", "a", "c"], ordered=True + ), + "Int": [0, 0, 0, 1, 1, 1], + "Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6], + } + b = DataFrame(b).set_index(["Cat", "Int"])["Factor"] + + expected = merge( + a, + b.reset_index(), + left_on=["Cat1", "Int1"], + right_on=["Cat", "Int"], + how="left", + ) + expected = expected.drop(["Cat", "Int"], axis=1) + result = a.join(b, on=["Cat1", "Int1"]) + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize("func", ["merge", "merge_asof"]) +@pytest.mark.parametrize( + ("kwargs", "err_msg"), + [ + ({"left_on": "a", "left_index": True}, ["left_on", "left_index"]), + ({"right_on": "a", "right_index": True}, ["right_on", "right_index"]), + ], +) +def test_merge_join_cols_error_reporting_duplicates(func, kwargs, err_msg): + # GH: 16228 + left = DataFrame({"a": [1, 2], "b": [3, 4]}) + right = DataFrame({"a": [1, 1], "c": [5, 6]}) + msg = rf'Can only pass argument "{err_msg[0]}" OR "{err_msg[1]}" not both\.' + with pytest.raises(MergeError, match=msg): + getattr(pd, func)(left, right, **kwargs) + + +@pytest.mark.parametrize("func", ["merge", "merge_asof"]) +@pytest.mark.parametrize( + ("kwargs", "err_msg"), + [ + ({"left_on": "a"}, ["right_on", "right_index"]), + ({"right_on": "a"}, ["left_on", "left_index"]), + ], +) +def test_merge_join_cols_error_reporting_missing(func, kwargs, err_msg): + # GH: 16228 + left = DataFrame({"a": [1, 2], "b": [3, 4]}) + right = DataFrame({"a": [1, 1], "c": [5, 6]}) + msg = rf'Must pass "{err_msg[0]}" OR "{err_msg[1]}"\.' + with pytest.raises(MergeError, match=msg): + getattr(pd, func)(left, right, **kwargs) + + +@pytest.mark.parametrize("func", ["merge", "merge_asof"]) +@pytest.mark.parametrize( + "kwargs", + [ + {"right_index": True}, + {"left_index": True}, + ], +) +def test_merge_join_cols_error_reporting_on_and_index(func, kwargs): + # GH: 16228 + left = DataFrame({"a": [1, 2], "b": [3, 4]}) + right = DataFrame({"a": [1, 1], "c": [5, 6]}) + msg = ( + r'Can only pass argument "on" OR "left_index" ' + r'and "right_index", not a combination of both\.' + ) + with pytest.raises(MergeError, match=msg): + getattr(pd, func)(left, right, on="a", **kwargs) + + +def test_merge_right_left_index(): + # GH#38616 + left = DataFrame({"x": [1, 1], "z": ["foo", "foo"]}) + right = DataFrame({"x": [1, 1], "z": ["foo", "foo"]}) + result = merge(left, right, how="right", left_index=True, right_on="x") + expected = DataFrame( + { + "x": [1, 1], + "x_x": [1, 1], + "z_x": ["foo", "foo"], + "x_y": [1, 1], + "z_y": ["foo", "foo"], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_result_empty_index_and_on(): + # GH#33814 + df1 = DataFrame({"a": [1], "b": [2]}).set_index(["a", "b"]) + df2 = DataFrame({"b": [1]}).set_index(["b"]) + expected = DataFrame({"a": [], "b": []}, dtype=np.int64).set_index(["a", "b"]) + result = merge(df1, df2, left_on=["b"], right_index=True) + tm.assert_frame_equal(result, expected) + + result = merge(df2, df1, left_index=True, right_on=["b"]) + tm.assert_frame_equal(result, expected) + + +def test_merge_suffixes_produce_dup_columns_raises(): + # GH#22818; Enforced in 2.0 + left = DataFrame({"a": [1, 2, 3], "b": 1, "b_x": 2}) + right = DataFrame({"a": [1, 2, 3], "b": 2}) + + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): + merge(left, right, on="a") + + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): + merge(right, left, on="a", suffixes=("_y", "_x")) + + +def test_merge_duplicate_columns_with_suffix_no_warning(): + # GH#22818 + # Do not raise warning when duplicates are caused by duplicates in origin + left = DataFrame([[1, 1, 1], [2, 2, 2]], columns=["a", "b", "b"]) + right = DataFrame({"a": [1, 3], "b": 2}) + result = merge(left, right, on="a") + expected = DataFrame([[1, 1, 1, 2]], columns=["a", "b_x", "b_x", "b_y"]) + tm.assert_frame_equal(result, expected) + + +def test_merge_duplicate_columns_with_suffix_causing_another_duplicate_raises(): + # GH#22818, Enforced in 2.0 + # This should raise warning because suffixes cause another collision + left = DataFrame([[1, 1, 1, 1], [2, 2, 2, 2]], columns=["a", "b", "b", "b_x"]) + right = DataFrame({"a": [1, 3], "b": 2}) + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): + merge(left, right, on="a") + + +def test_merge_string_float_column_result(): + # GH 13353 + df1 = DataFrame([[1, 2], [3, 4]], columns=Index(["a", 114.0])) + df2 = DataFrame([[9, 10], [11, 12]], columns=["x", "y"]) + result = merge(df2, df1, how="inner", left_index=True, right_index=True) + expected = DataFrame( + [[9, 10, 1, 2], [11, 12, 3, 4]], columns=Index(["x", "y", "a", 114.0]) + ) + tm.assert_frame_equal(result, expected) + + +def test_mergeerror_on_left_index_mismatched_dtypes(): + # GH 22449 + df_1 = DataFrame(data=["X"], columns=["C"], index=[22]) + df_2 = DataFrame(data=["X"], columns=["C"], index=[999]) + with pytest.raises(MergeError, match="Can only pass argument"): + merge(df_1, df_2, on=["C"], left_index=True) + + +def test_merge_on_left_categoricalindex(): + # GH#48464 don't raise when left_on is a CategoricalIndex + ci = CategoricalIndex(range(3)) + + right = DataFrame({"A": ci, "B": range(3)}) + left = DataFrame({"C": range(3, 6)}) + + res = merge(left, right, left_on=ci, right_on="A") + expected = merge(left, right, left_on=ci._data, right_on="A") + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize("dtype", [None, "Int64"]) +def test_merge_outer_with_NaN(dtype): + # GH#43550 + left = DataFrame({"key": [1, 2], "col1": [1, 2]}, dtype=dtype) + right = DataFrame({"key": [np.nan, np.nan], "col2": [3, 4]}, dtype=dtype) + result = merge(left, right, on="key", how="outer") + expected = DataFrame( + { + "key": [1, 2, np.nan, np.nan], + "col1": [1, 2, np.nan, np.nan], + "col2": [np.nan, np.nan, 3, 4], + }, + dtype=dtype, + ) + tm.assert_frame_equal(result, expected) + + # switch left and right + result = merge(right, left, on="key", how="outer") + expected = DataFrame( + { + "key": [1, 2, np.nan, np.nan], + "col2": [np.nan, np.nan, 3, 4], + "col1": [1, 2, np.nan, np.nan], + }, + dtype=dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_different_index_names(): + # GH#45094 + left = DataFrame({"a": [1]}, index=Index([1], name="c")) + right = DataFrame({"a": [1]}, index=Index([1], name="d")) + result = merge(left, right, left_on="c", right_on="d") + expected = DataFrame({"a_x": [1], "a_y": 1}) + tm.assert_frame_equal(result, expected) + + +def test_merge_ea(any_numeric_ea_dtype, join_type): + # GH#44240 + left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype) + right = DataFrame({"a": [1, 2, 3], "c": 2}, dtype=any_numeric_ea_dtype) + result = left.merge(right, how=join_type) + expected = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2}, dtype=any_numeric_ea_dtype) + tm.assert_frame_equal(result, expected) + + +def test_merge_ea_and_non_ea(any_numeric_ea_dtype, join_type): + # GH#44240 + left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype) + right = DataFrame({"a": [1, 2, 3], "c": 2}, dtype=any_numeric_ea_dtype.lower()) + result = left.merge(right, how=join_type) + expected = DataFrame( + { + "a": Series([1, 2, 3], dtype=any_numeric_ea_dtype), + "b": Series([1, 1, 1], dtype=any_numeric_ea_dtype), + "c": Series([2, 2, 2], dtype=any_numeric_ea_dtype.lower()), + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["int64", "int64[pyarrow]"]) +def test_merge_arrow_and_numpy_dtypes(dtype): + # GH#52406 + pytest.importorskip("pyarrow") + df = DataFrame({"a": [1, 2]}, dtype=dtype) + df2 = DataFrame({"a": [1, 2]}, dtype="int64[pyarrow]") + result = df.merge(df2) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + result = df2.merge(df) + expected = df2.copy() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("how", ["inner", "left", "outer", "right"]) +@pytest.mark.parametrize("tz", [None, "America/Chicago"]) +def test_merge_datetime_different_resolution(tz, how): + # https://github.com/pandas-dev/pandas/issues/53200 + vals = [ + pd.Timestamp(2023, 5, 12, tz=tz), + pd.Timestamp(2023, 5, 13, tz=tz), + pd.Timestamp(2023, 5, 14, tz=tz), + ] + df1 = DataFrame({"t": vals[:2], "a": [1.0, 2.0]}) + df1["t"] = df1["t"].dt.as_unit("ns") + df2 = DataFrame({"t": vals[1:], "b": [1.0, 2.0]}) + df2["t"] = df2["t"].dt.as_unit("s") + + expected = DataFrame({"t": vals, "a": [1.0, 2.0, np.nan], "b": [np.nan, 1.0, 2.0]}) + expected["t"] = expected["t"].dt.as_unit("ns") + if how == "inner": + expected = expected.iloc[[1]].reset_index(drop=True) + elif how == "left": + expected = expected.iloc[[0, 1]] + elif how == "right": + expected = expected.iloc[[1, 2]].reset_index(drop=True) + + result = df1.merge(df2, on="t", how=how) + tm.assert_frame_equal(result, expected) + + +def test_merge_multiindex_single_level(): + # GH52331 + df = DataFrame({"col": ["A", "B"]}) + df2 = DataFrame( + data={"b": [100]}, + index=MultiIndex.from_tuples([("A",), ("C",)], names=["col"]), + ) + expected = DataFrame({"col": ["A", "B"], "b": [100, np.nan]}) + + result = df.merge(df2, left_on=["col"], right_index=True, how="left") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) +@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("on_index", [True, False]) +@pytest.mark.parametrize("left_unique", [True, False]) +@pytest.mark.parametrize("left_monotonic", [True, False]) +@pytest.mark.parametrize("right_unique", [True, False]) +@pytest.mark.parametrize("right_monotonic", [True, False]) +def test_merge_combinations( + how, sort, on_index, left_unique, left_monotonic, right_unique, right_monotonic +): + # GH 54611 + left = [2, 3] + if left_unique: + left.append(4 if left_monotonic else 1) + else: + left.append(3 if left_monotonic else 2) + + right = [2, 3] + if right_unique: + right.append(4 if right_monotonic else 1) + else: + right.append(3 if right_monotonic else 2) + + left = DataFrame({"key": left}) + right = DataFrame({"key": right}) + + if on_index: + left = left.set_index("key") + right = right.set_index("key") + on_kwargs = {"left_index": True, "right_index": True} + else: + on_kwargs = {"on": "key"} + + result = merge(left, right, how=how, sort=sort, **on_kwargs) + + if on_index: + left = left.reset_index() + right = right.reset_index() + + if how in ["left", "right", "inner"]: + if how in ["left", "inner"]: + expected, other, other_unique = left, right, right_unique + else: + expected, other, other_unique = right, left, left_unique + if how == "inner": + keep_values = set(left["key"].values).intersection(right["key"].values) + keep_mask = expected["key"].isin(keep_values) + expected = expected[keep_mask] + if sort: + expected = expected.sort_values("key") + if not other_unique: + other_value_counts = other["key"].value_counts() + repeats = other_value_counts.reindex(expected["key"].values, fill_value=1) + repeats = repeats.astype(np.intp) + expected = expected["key"].repeat(repeats.values) + expected = expected.to_frame() + elif how == "outer": + left_counts = left["key"].value_counts() + right_counts = right["key"].value_counts() + expected_counts = left_counts.mul(right_counts, fill_value=1) + expected_counts = expected_counts.astype(np.intp) + expected = expected_counts.index.values.repeat(expected_counts.values) + expected = DataFrame({"key": expected}) + expected = expected.sort_values("key") + + if on_index: + expected = expected.set_index("key") + else: + expected = expected.reset_index(drop=True) + + tm.assert_frame_equal(result, expected) + + +def test_merge_ea_int_and_float_numpy(): + # GH#46178 + df1 = DataFrame([1.0, np.nan], dtype=pd.Int64Dtype()) + df2 = DataFrame([1.5]) + expected = DataFrame(columns=[0], dtype="Int64") + + with tm.assert_produces_warning(UserWarning, match="You are merging"): + result = df1.merge(df2) + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(UserWarning, match="You are merging"): + result = df2.merge(df1) + tm.assert_frame_equal(result, expected.astype("float64")) + + df2 = DataFrame([1.0]) + expected = DataFrame([1], columns=[0], dtype="Int64") + result = df1.merge(df2) + tm.assert_frame_equal(result, expected) + + result = df2.merge(df1) + tm.assert_frame_equal(result, expected.astype("float64")) + + +def test_merge_arrow_string_index(any_string_dtype): + # GH#54894 + pytest.importorskip("pyarrow") + left = DataFrame({"a": ["a", "b"]}, dtype=any_string_dtype) + right = DataFrame({"b": 1}, index=Index(["a", "c"], dtype=any_string_dtype)) + result = left.merge(right, left_on="a", right_index=True, how="left") + expected = DataFrame( + {"a": Series(["a", "b"], dtype=any_string_dtype), "b": [1, np.nan]} + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("left_empty", [True, False]) +@pytest.mark.parametrize("right_empty", [True, False]) +def test_merge_empty_frames_column_order(left_empty, right_empty): + # GH 51929 + df1 = DataFrame(1, index=[0], columns=["A", "B"]) + df2 = DataFrame(1, index=[0], columns=["A", "C", "D"]) + + if left_empty: + df1 = df1.iloc[:0] + if right_empty: + df2 = df2.iloc[:0] + + result = merge(df1, df2, on=["A"], how="outer") + expected = DataFrame(1, index=[0], columns=["A", "B", "C", "D"]) + if left_empty and right_empty: + expected = expected.iloc[:0] + elif left_empty: + expected["B"] = np.nan + elif right_empty: + expected[["C", "D"]] = np.nan + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) +def test_merge_datetime_and_timedelta(how): + left = DataFrame({"key": Series([1, None], dtype="datetime64[ns]")}) + right = DataFrame({"key": Series([1], dtype="timedelta64[ns]")}) + + msg = ( + f"You are trying to merge on {left['key'].dtype} and {right['key'].dtype} " + "columns for key 'key'. If you wish to proceed you should use pd.concat" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + left.merge(right, on="key", how=how) + + msg = ( + f"You are trying to merge on {right['key'].dtype} and {left['key'].dtype} " + "columns for key 'key'. If you wish to proceed you should use pd.concat" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + right.merge(left, on="key", how=how) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_asof.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_asof.py new file mode 100644 index 0000000000000000000000000000000000000000..a2e22ea73fd86ef1795298d99404bfdb04be81f2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_asof.py @@ -0,0 +1,3657 @@ +import datetime + +import numpy as np +import pytest +import pytz + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Index, + Timedelta, + merge_asof, + option_context, + to_datetime, +) +import pandas._testing as tm +from pandas.core.reshape.merge import MergeError + + +@pytest.fixture(params=["s", "ms", "us", "ns"]) +def unit(request): + """ + Resolution for datetimelike dtypes. + """ + return request.param + + +class TestAsOfMerge: + def prep_data(self, df, dedupe=False): + if dedupe: + df = df.drop_duplicates(["time", "ticker"], keep="last").reset_index( + drop=True + ) + df.time = to_datetime(df.time) + return df + + @pytest.fixture + def trades(self): + df = pd.DataFrame( + [ + ["20160525 13:30:00.023", "MSFT", "51.9500", "75", "NASDAQ"], + ["20160525 13:30:00.038", "MSFT", "51.9500", "155", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.7700", "100", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9200", "100", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "200", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "300", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "600", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "44", "NASDAQ"], + ["20160525 13:30:00.074", "AAPL", "98.6700", "478343", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6700", "478343", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6600", "6", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "30", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "75", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "20", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "35", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "10", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"], + ["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "1000", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "300", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "400", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "600", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "783", "NASDAQ"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"], + ], + columns="time,ticker,price,quantity,marketCenter".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + return self.prep_data(df) + + @pytest.fixture + def quotes(self): + df = pd.DataFrame( + [ + ["20160525 13:30:00.023", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.023", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.041", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.072", "GOOG", "720.50", "720.88"], + ["20160525 13:30:00.075", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.92", "51.95"], + ], + columns="time,ticker,bid,ask".split(","), + ) + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df, dedupe=True) + + @pytest.fixture + def asof(self): + df = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df) + + @pytest.fixture + def tolerance(self): + df = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df) + + @pytest.fixture + def allow_exact_matches(self, datapath): + df = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + np.nan, + np.nan, + ], + ["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan], + ["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.95", + "51.95", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df) + + @pytest.fixture + def allow_exact_matches_and_tolerance(self): + df = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + np.nan, + np.nan, + ], + ["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan], + ["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.95", + "51.95", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df) + + def test_examples1(self): + """doc-string examples""" + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]} + ) + + result = merge_asof(left, right, on="a") + tm.assert_frame_equal(result, expected) + + def test_examples2(self, unit): + """doc-string examples""" + if unit == "s": + pytest.skip( + "This test is invalid for unit='s' because that would " + "round the trades['time']]" + ) + trades = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.038", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + ] + ).astype(f"M8[{unit}]"), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + }, + columns=["time", "ticker", "price", "quantity"], + ) + + quotes = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.030", + "20160525 13:30:00.041", + "20160525 13:30:00.048", + "20160525 13:30:00.049", + "20160525 13:30:00.072", + "20160525 13:30:00.075", + ] + ).astype(f"M8[{unit}]"), + "ticker": [ + "GOOG", + "MSFT", + "MSFT", + "MSFT", + "GOOG", + "AAPL", + "GOOG", + "MSFT", + ], + "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], + "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03], + }, + columns=["time", "ticker", "bid", "ask"], + ) + + merge_asof(trades, quotes, on="time", by="ticker") + + merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("2ms")) + + expected = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.038", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + ] + ).astype(f"M8[{unit}]"), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + "bid": [np.nan, 51.97, np.nan, np.nan, np.nan], + "ask": [np.nan, 51.98, np.nan, np.nan, np.nan], + }, + columns=["time", "ticker", "price", "quantity", "bid", "ask"], + ) + + result = merge_asof( + trades, + quotes, + on="time", + by="ticker", + tolerance=Timedelta("10ms"), + allow_exact_matches=False, + ) + tm.assert_frame_equal(result, expected) + + def test_examples3(self): + """doc-string examples""" + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]} + ) + + result = merge_asof(left, right, on="a", direction="forward") + tm.assert_frame_equal(result, expected) + + def test_examples4(self): + """doc-string examples""" + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]} + ) + + result = merge_asof(left, right, on="a", direction="nearest") + tm.assert_frame_equal(result, expected) + + def test_basic(self, trades, asof, quotes): + expected = asof + + result = merge_asof(trades, quotes, on="time", by="ticker") + tm.assert_frame_equal(result, expected) + + def test_basic_categorical(self, trades, asof, quotes): + expected = asof + trades.ticker = trades.ticker.astype("category") + quotes.ticker = quotes.ticker.astype("category") + expected.ticker = expected.ticker.astype("category") + + result = merge_asof(trades, quotes, on="time", by="ticker") + tm.assert_frame_equal(result, expected) + + def test_basic_left_index(self, trades, asof, quotes): + # GH14253 + expected = asof + trades = trades.set_index("time") + + result = merge_asof( + trades, quotes, left_index=True, right_on="time", by="ticker" + ) + # left-only index uses right"s index, oddly + expected.index = result.index + # time column appears after left"s columns + expected = expected[result.columns] + tm.assert_frame_equal(result, expected) + + def test_basic_right_index(self, trades, asof, quotes): + expected = asof + quotes = quotes.set_index("time") + + result = merge_asof( + trades, quotes, left_on="time", right_index=True, by="ticker" + ) + tm.assert_frame_equal(result, expected) + + def test_basic_left_index_right_index(self, trades, asof, quotes): + expected = asof.set_index("time") + trades = trades.set_index("time") + quotes = quotes.set_index("time") + + result = merge_asof( + trades, quotes, left_index=True, right_index=True, by="ticker" + ) + tm.assert_frame_equal(result, expected) + + def test_multi_index_left(self, trades, quotes): + # MultiIndex is prohibited + trades = trades.set_index(["time", "price"]) + quotes = quotes.set_index("time") + with pytest.raises(MergeError, match="left can only have one index"): + merge_asof(trades, quotes, left_index=True, right_index=True) + + def test_multi_index_right(self, trades, quotes): + # MultiIndex is prohibited + trades = trades.set_index("time") + quotes = quotes.set_index(["time", "bid"]) + with pytest.raises(MergeError, match="right can only have one index"): + merge_asof(trades, quotes, left_index=True, right_index=True) + + def test_on_and_index_left_on(self, trades, quotes): + # "on" parameter and index together is prohibited + trades = trades.set_index("time") + quotes = quotes.set_index("time") + msg = 'Can only pass argument "left_on" OR "left_index" not both.' + with pytest.raises(MergeError, match=msg): + merge_asof( + trades, quotes, left_on="price", left_index=True, right_index=True + ) + + def test_on_and_index_right_on(self, trades, quotes): + trades = trades.set_index("time") + quotes = quotes.set_index("time") + msg = 'Can only pass argument "right_on" OR "right_index" not both.' + with pytest.raises(MergeError, match=msg): + merge_asof( + trades, quotes, right_on="bid", left_index=True, right_index=True + ) + + def test_basic_left_by_right_by(self, trades, asof, quotes): + # GH14253 + expected = asof + + result = merge_asof( + trades, quotes, on="time", left_by="ticker", right_by="ticker" + ) + tm.assert_frame_equal(result, expected) + + def test_missing_right_by(self, trades, asof, quotes): + expected = asof + + q = quotes[quotes.ticker != "MSFT"] + result = merge_asof(trades, q, on="time", by="ticker") + expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan + tm.assert_frame_equal(result, expected) + + def test_multiby(self): + # GH13936 + trades = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.046", + "20160525 13:30:00.048", + "20160525 13:30:00.050", + ] + ), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + }, + columns=["time", "ticker", "exch", "price", "quantity"], + ) + + quotes = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.030", + "20160525 13:30:00.041", + "20160525 13:30:00.045", + "20160525 13:30:00.049", + ] + ), + "ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"], + "exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"], + "bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99], + "ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01], + }, + columns=["time", "ticker", "exch", "bid", "ask"], + ) + + expected = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.046", + "20160525 13:30:00.048", + "20160525 13:30:00.050", + ] + ), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + "bid": [np.nan, 51.95, 720.50, 720.51, np.nan], + "ask": [np.nan, 51.96, 720.93, 720.92, np.nan], + }, + columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"], + ) + + result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["object", "string"]) + def test_multiby_heterogeneous_types(self, dtype): + # GH13936 + trades = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.046", + "20160525 13:30:00.048", + "20160525 13:30:00.050", + ] + ), + "ticker": [0, 0, 1, 1, 2], + "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + }, + columns=["time", "ticker", "exch", "price", "quantity"], + ) + trades = trades.astype({"ticker": dtype, "exch": dtype}) + + quotes = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.030", + "20160525 13:30:00.041", + "20160525 13:30:00.045", + "20160525 13:30:00.049", + ] + ), + "ticker": [1, 0, 0, 0, 1, 2], + "exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"], + "bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99], + "ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01], + }, + columns=["time", "ticker", "exch", "bid", "ask"], + ) + quotes = quotes.astype({"ticker": dtype, "exch": dtype}) + + expected = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.046", + "20160525 13:30:00.048", + "20160525 13:30:00.050", + ] + ), + "ticker": [0, 0, 1, 1, 2], + "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + "bid": [np.nan, 51.95, 720.50, 720.51, np.nan], + "ask": [np.nan, 51.96, 720.93, 720.92, np.nan], + }, + columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"], + ) + expected = expected.astype({"ticker": dtype, "exch": dtype}) + + result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"]) + tm.assert_frame_equal(result, expected) + + def test_mismatched_index_dtype(self): + # similar to test_multiby_indexed, but we change the dtype on left.index + left = pd.DataFrame( + [ + [to_datetime("20160602"), 1, "a"], + [to_datetime("20160602"), 2, "a"], + [to_datetime("20160603"), 1, "b"], + [to_datetime("20160603"), 2, "b"], + ], + columns=["time", "k1", "k2"], + ).set_index("time") + # different dtype for the index + left.index = left.index - pd.Timestamp(0) + + right = pd.DataFrame( + [ + [to_datetime("20160502"), 1, "a", 1.0], + [to_datetime("20160502"), 2, "a", 2.0], + [to_datetime("20160503"), 1, "b", 3.0], + [to_datetime("20160503"), 2, "b", 4.0], + ], + columns=["time", "k1", "k2", "value"], + ).set_index("time") + + msg = "incompatible merge keys" + with pytest.raises(MergeError, match=msg): + merge_asof(left, right, left_index=True, right_index=True, by=["k1", "k2"]) + + def test_multiby_indexed(self): + # GH15676 + left = pd.DataFrame( + [ + [to_datetime("20160602"), 1, "a"], + [to_datetime("20160602"), 2, "a"], + [to_datetime("20160603"), 1, "b"], + [to_datetime("20160603"), 2, "b"], + ], + columns=["time", "k1", "k2"], + ).set_index("time") + + right = pd.DataFrame( + [ + [to_datetime("20160502"), 1, "a", 1.0], + [to_datetime("20160502"), 2, "a", 2.0], + [to_datetime("20160503"), 1, "b", 3.0], + [to_datetime("20160503"), 2, "b", 4.0], + ], + columns=["time", "k1", "k2", "value"], + ).set_index("time") + + expected = pd.DataFrame( + [ + [to_datetime("20160602"), 1, "a", 1.0], + [to_datetime("20160602"), 2, "a", 2.0], + [to_datetime("20160603"), 1, "b", 3.0], + [to_datetime("20160603"), 2, "b", 4.0], + ], + columns=["time", "k1", "k2", "value"], + ).set_index("time") + + result = merge_asof( + left, right, left_index=True, right_index=True, by=["k1", "k2"] + ) + + tm.assert_frame_equal(expected, result) + + with pytest.raises( + MergeError, match="left_by and right_by must be the same length" + ): + merge_asof( + left, + right, + left_index=True, + right_index=True, + left_by=["k1", "k2"], + right_by=["k1"], + ) + + def test_basic2(self, datapath): + expected = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.084", + "AAPL", + "98.64", + "40", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.084", + "AAPL", + "98.55", + "149", + "EDGX", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.086", + "AAPL", + "98.56", + "500", + "ARCA", + "98.55", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "647", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "300", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "50", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "50", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "70", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "70", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "1", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "62", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "10", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "100", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.105", + "AAPL", + "98.63", + "100", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.105", + "AAPL", + "98.63", + "700", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.106", + "AAPL", + "98.63", + "61", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.107", + "AAPL", + "98.63", + "100", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.107", + "AAPL", + "98.63", + "53", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.108", + "AAPL", + "98.63", + "100", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.108", + "AAPL", + "98.63", + "839", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.115", + "AAPL", + "98.63", + "5", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.118", + "AAPL", + "98.63", + "295", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.118", + "AAPL", + "98.63", + "5", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.128", + "AAPL", + "98.63", + "100", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.128", + "AAPL", + "98.63", + "100", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.128", + "MSFT", + "51.92", + "100", + "ARCA", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "100", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "10", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "59", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "31", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "69", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "12", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "12", + "EDGX", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "100", + "ARCA", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "100", + "ARCA", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.130", + "MSFT", + "51.95", + "317", + "ARCA", + "51.93", + "51.95", + ], + [ + "20160525 13:30:00.130", + "MSFT", + "51.95", + "283", + "ARCA", + "51.93", + "51.95", + ], + [ + "20160525 13:30:00.135", + "MSFT", + "51.93", + "100", + "EDGX", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.135", + "AAPL", + "98.62", + "100", + "ARCA", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "12", + "NASDAQ", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "88", + "NASDAQ", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "162", + "NASDAQ", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.61", + "100", + "BATS", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "61", + "ARCA", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "25", + "ARCA", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "14", + "ARCA", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.145", + "AAPL", + "98.62", + "12", + "ARCA", + "98.6", + "98.63", + ], + [ + "20160525 13:30:00.145", + "AAPL", + "98.62", + "100", + "ARCA", + "98.6", + "98.63", + ], + [ + "20160525 13:30:00.145", + "AAPL", + "98.63", + "100", + "NASDAQ", + "98.6", + "98.63", + ], + [ + "20160525 13:30:00.145", + "AAPL", + "98.63", + "100", + "NASDAQ", + "98.6", + "98.63", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + expected["price"] = expected["price"].astype("float64") + expected["quantity"] = expected["quantity"].astype("int64") + expected["bid"] = expected["bid"].astype("float64") + expected["ask"] = expected["ask"].astype("float64") + expected = self.prep_data(expected) + + trades = pd.DataFrame( + [ + ["20160525 13:30:00.023", "MSFT", "51.9500", "75", "NASDAQ"], + ["20160525 13:30:00.038", "MSFT", "51.9500", "155", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.7700", "100", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9200", "100", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "200", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "300", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "600", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "44", "NASDAQ"], + ["20160525 13:30:00.074", "AAPL", "98.6700", "478343", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6700", "478343", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6600", "6", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "30", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "75", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "20", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "35", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "10", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"], + ["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "1000", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "300", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "400", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "600", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "783", "NASDAQ"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"], + ["20160525 13:30:00.084", "AAPL", "98.6400", "40", "NASDAQ"], + ["20160525 13:30:00.084", "AAPL", "98.5500", "149", "EDGX"], + ["20160525 13:30:00.086", "AAPL", "98.5600", "500", "ARCA"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "647", "EDGX"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "300", "EDGX"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "50", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "50", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "70", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "70", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "1", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "62", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "10", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "100", "ARCA"], + ["20160525 13:30:00.105", "AAPL", "98.6300", "100", "ARCA"], + ["20160525 13:30:00.105", "AAPL", "98.6300", "700", "ARCA"], + ["20160525 13:30:00.106", "AAPL", "98.6300", "61", "EDGX"], + ["20160525 13:30:00.107", "AAPL", "98.6300", "100", "ARCA"], + ["20160525 13:30:00.107", "AAPL", "98.6300", "53", "ARCA"], + ["20160525 13:30:00.108", "AAPL", "98.6300", "100", "ARCA"], + ["20160525 13:30:00.108", "AAPL", "98.6300", "839", "ARCA"], + ["20160525 13:30:00.115", "AAPL", "98.6300", "5", "EDGX"], + ["20160525 13:30:00.118", "AAPL", "98.6300", "295", "EDGX"], + ["20160525 13:30:00.118", "AAPL", "98.6300", "5", "EDGX"], + ["20160525 13:30:00.128", "AAPL", "98.6300", "100", "NASDAQ"], + ["20160525 13:30:00.128", "AAPL", "98.6300", "100", "NASDAQ"], + ["20160525 13:30:00.128", "MSFT", "51.9200", "100", "ARCA"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "100", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "10", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "59", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "31", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "69", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "12", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "12", "EDGX"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "100", "ARCA"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "100", "ARCA"], + ["20160525 13:30:00.130", "MSFT", "51.9500", "317", "ARCA"], + ["20160525 13:30:00.130", "MSFT", "51.9500", "283", "ARCA"], + ["20160525 13:30:00.135", "MSFT", "51.9300", "100", "EDGX"], + ["20160525 13:30:00.135", "AAPL", "98.6200", "100", "ARCA"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "12", "NASDAQ"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "88", "NASDAQ"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "162", "NASDAQ"], + ["20160525 13:30:00.144", "AAPL", "98.6100", "100", "BATS"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "61", "ARCA"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "25", "ARCA"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "14", "ARCA"], + ["20160525 13:30:00.145", "AAPL", "98.6200", "12", "ARCA"], + ["20160525 13:30:00.145", "AAPL", "98.6200", "100", "ARCA"], + ["20160525 13:30:00.145", "AAPL", "98.6300", "100", "NASDAQ"], + ["20160525 13:30:00.145", "AAPL", "98.6300", "100", "NASDAQ"], + ], + columns="time,ticker,price,quantity,marketCenter".split(","), + ) + trades["price"] = trades["price"].astype("float64") + trades["quantity"] = trades["quantity"].astype("int64") + trades = self.prep_data(trades) + + quotes = pd.DataFrame( + [ + ["20160525 13:30:00.023", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.023", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.041", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.072", "GOOG", "720.50", "720.88"], + ["20160525 13:30:00.075", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.92", "51.95"], + ["20160525 13:30:00.079", "MSFT", "51.92", "51.95"], + ["20160525 13:30:00.080", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.084", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.086", "AAPL", "98.55", "98.63"], + ["20160525 13:30:00.088", "AAPL", "98.65", "98.63"], + ["20160525 13:30:00.089", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.105", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.107", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.115", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.115", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.118", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.128", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.128", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.130", "MSFT", "51.93", "51.95"], + ["20160525 13:30:00.130", "MSFT", "51.93", "51.95"], + ["20160525 13:30:00.130", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.131", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.131", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.135", "MSFT", "51.92", "51.95"], + ["20160525 13:30:00.135", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.136", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.136", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.144", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.144", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.145", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.145", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.145", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.145", "AAPL", "98.60", "98.63"], + ["20160525 13:30:00.145", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.145", "AAPL", "98.60", "98.63"], + ], + columns="time,ticker,bid,ask".split(","), + ) + quotes["bid"] = quotes["bid"].astype("float64") + quotes["ask"] = quotes["ask"].astype("float64") + quotes = self.prep_data(quotes, dedupe=True) + + result = merge_asof(trades, quotes, on="time", by="ticker") + tm.assert_frame_equal(result, expected) + + def test_basic_no_by(self, trades, asof, quotes): + f = ( + lambda x: x[x.ticker == "MSFT"] + .drop("ticker", axis=1) + .reset_index(drop=True) + ) + + # just use a single ticker + expected = f(asof) + trades = f(trades) + quotes = f(quotes) + + result = merge_asof(trades, quotes, on="time") + tm.assert_frame_equal(result, expected) + + def test_valid_join_keys(self, trades, quotes): + msg = r"incompatible merge keys \[1\] .* must be the same type" + + with pytest.raises(MergeError, match=msg): + merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker") + + with pytest.raises(MergeError, match="can only asof on a key for left"): + merge_asof(trades, quotes, on=["time", "ticker"], by="ticker") + + with pytest.raises(MergeError, match="can only asof on a key for left"): + merge_asof(trades, quotes, by="ticker") + + def test_with_duplicates(self, datapath, trades, quotes, asof): + q = ( + pd.concat([quotes, quotes]) + .sort_values(["time", "ticker"]) + .reset_index(drop=True) + ) + result = merge_asof(trades, q, on="time", by="ticker") + expected = self.prep_data(asof) + tm.assert_frame_equal(result, expected) + + def test_with_duplicates_no_on(self): + df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]}) + df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]}) + result = merge_asof(df1, df2, on="key") + expected = pd.DataFrame( + {"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]} + ) + tm.assert_frame_equal(result, expected) + + def test_valid_allow_exact_matches(self, trades, quotes): + msg = "allow_exact_matches must be boolean, passed foo" + + with pytest.raises(MergeError, match=msg): + merge_asof( + trades, quotes, on="time", by="ticker", allow_exact_matches="foo" + ) + + def test_valid_tolerance(self, trades, quotes): + # dti + merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s")) + + # integer + merge_asof( + trades.reset_index(), + quotes.reset_index(), + on="index", + by="ticker", + tolerance=1, + ) + + msg = r"incompatible tolerance .*, must be compat with type .*" + + # incompat + with pytest.raises(MergeError, match=msg): + merge_asof(trades, quotes, on="time", by="ticker", tolerance=1) + + # invalid + with pytest.raises(MergeError, match=msg): + merge_asof( + trades.reset_index(), + quotes.reset_index(), + on="index", + by="ticker", + tolerance=1.0, + ) + + msg = "tolerance must be positive" + + # invalid negative + with pytest.raises(MergeError, match=msg): + merge_asof( + trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s") + ) + + with pytest.raises(MergeError, match=msg): + merge_asof( + trades.reset_index(), + quotes.reset_index(), + on="index", + by="ticker", + tolerance=-1, + ) + + def test_non_sorted(self, trades, quotes): + trades = trades.sort_values("time", ascending=False) + quotes = quotes.sort_values("time", ascending=False) + + # we require that we are already sorted on time & quotes + assert not trades.time.is_monotonic_increasing + assert not quotes.time.is_monotonic_increasing + with pytest.raises(ValueError, match="left keys must be sorted"): + merge_asof(trades, quotes, on="time", by="ticker") + + trades = trades.sort_values("time") + assert trades.time.is_monotonic_increasing + assert not quotes.time.is_monotonic_increasing + with pytest.raises(ValueError, match="right keys must be sorted"): + merge_asof(trades, quotes, on="time", by="ticker") + + quotes = quotes.sort_values("time") + assert trades.time.is_monotonic_increasing + assert quotes.time.is_monotonic_increasing + + # ok, though has dupes + merge_asof(trades, quotes, on="time", by="ticker") + + @pytest.mark.parametrize( + "tolerance_ts", + [Timedelta("1day"), datetime.timedelta(days=1)], + ids=["Timedelta", "datetime.timedelta"], + ) + def test_tolerance(self, tolerance_ts, trades, quotes, tolerance): + result = merge_asof( + trades, quotes, on="time", by="ticker", tolerance=tolerance_ts + ) + expected = tolerance + tm.assert_frame_equal(result, expected) + + def test_tolerance_forward(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]} + ) + + result = merge_asof(left, right, on="a", direction="forward", tolerance=1) + tm.assert_frame_equal(result, expected) + + def test_tolerance_nearest(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]} + ) + + result = merge_asof(left, right, on="a", direction="nearest", tolerance=1) + tm.assert_frame_equal(result, expected) + + def test_tolerance_tz(self, unit): + # GH 14844 + left = pd.DataFrame( + { + "date": pd.date_range( + start=to_datetime("2016-01-02"), + freq="D", + periods=5, + tz=pytz.timezone("UTC"), + unit=unit, + ), + "value1": np.arange(5), + } + ) + right = pd.DataFrame( + { + "date": pd.date_range( + start=to_datetime("2016-01-01"), + freq="D", + periods=5, + tz=pytz.timezone("UTC"), + unit=unit, + ), + "value2": list("ABCDE"), + } + ) + result = merge_asof(left, right, on="date", tolerance=Timedelta("1 day")) + + expected = pd.DataFrame( + { + "date": pd.date_range( + start=to_datetime("2016-01-02"), + freq="D", + periods=5, + tz=pytz.timezone("UTC"), + unit=unit, + ), + "value1": np.arange(5), + "value2": list("BCDEE"), + } + ) + tm.assert_frame_equal(result, expected) + + def test_tolerance_float(self): + # GH22981 + left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame( + {"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]} + ) + + expected = pd.DataFrame( + { + "a": [1.1, 3.5, 10.9], + "left_val": ["a", "b", "c"], + "right_val": [1, 3.3, np.nan], + } + ) + + result = merge_asof(left, right, on="a", direction="nearest", tolerance=0.5) + tm.assert_frame_equal(result, expected) + + def test_index_tolerance(self, trades, quotes, tolerance): + # GH 15135 + expected = tolerance.set_index("time") + trades = trades.set_index("time") + quotes = quotes.set_index("time") + + result = merge_asof( + trades, + quotes, + left_index=True, + right_index=True, + by="ticker", + tolerance=Timedelta("1day"), + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches(self, trades, quotes, allow_exact_matches): + result = merge_asof( + trades, quotes, on="time", by="ticker", allow_exact_matches=False + ) + expected = allow_exact_matches + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_forward(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]} + ) + + result = merge_asof( + left, right, on="a", direction="forward", allow_exact_matches=False + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_nearest(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]} + ) + + result = merge_asof( + left, right, on="a", direction="nearest", allow_exact_matches=False + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance( + self, trades, quotes, allow_exact_matches_and_tolerance + ): + result = merge_asof( + trades, + quotes, + on="time", + by="ticker", + tolerance=Timedelta("100ms"), + allow_exact_matches=False, + ) + expected = allow_exact_matches_and_tolerance + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance2(self): + # GH 13695 + df1 = pd.DataFrame( + {"time": to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]} + ) + df2 = pd.DataFrame( + { + "time": to_datetime( + ["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"] + ), + "version": [1, 2], + } + ) + + result = merge_asof(df1, df2, on="time") + expected = pd.DataFrame( + { + "time": to_datetime(["2016-07-15 13:30:00.030"]), + "username": ["bob"], + "version": [2], + } + ) + tm.assert_frame_equal(result, expected) + + result = merge_asof(df1, df2, on="time", allow_exact_matches=False) + expected = pd.DataFrame( + { + "time": to_datetime(["2016-07-15 13:30:00.030"]), + "username": ["bob"], + "version": [1], + } + ) + tm.assert_frame_equal(result, expected) + + result = merge_asof( + df1, + df2, + on="time", + allow_exact_matches=False, + tolerance=Timedelta("10ms"), + ) + expected = pd.DataFrame( + { + "time": to_datetime(["2016-07-15 13:30:00.030"]), + "username": ["bob"], + "version": [np.nan], + } + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance3(self): + # GH 13709 + df1 = pd.DataFrame( + { + "time": to_datetime( + ["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"] + ), + "username": ["bob", "charlie"], + } + ) + df2 = pd.DataFrame( + { + "time": to_datetime( + ["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"] + ), + "version": [1, 2], + } + ) + + result = merge_asof( + df1, + df2, + on="time", + allow_exact_matches=False, + tolerance=Timedelta("10ms"), + ) + expected = pd.DataFrame( + { + "time": to_datetime( + ["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"] + ), + "username": ["bob", "charlie"], + "version": [np.nan, np.nan], + } + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance_forward(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]} + ) + + result = merge_asof( + left, + right, + on="a", + direction="forward", + allow_exact_matches=False, + tolerance=1, + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance_nearest(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]} + ) + + result = merge_asof( + left, + right, + on="a", + direction="nearest", + allow_exact_matches=False, + tolerance=1, + ) + tm.assert_frame_equal(result, expected) + + def test_forward_by(self): + # GH14887 + + left = pd.DataFrame( + { + "a": [1, 5, 10, 12, 15], + "b": ["X", "X", "Y", "Z", "Y"], + "left_val": ["a", "b", "c", "d", "e"], + } + ) + right = pd.DataFrame( + { + "a": [1, 6, 11, 15, 16], + "b": ["X", "Z", "Y", "Z", "Y"], + "right_val": [1, 6, 11, 15, 16], + } + ) + + expected = pd.DataFrame( + { + "a": [1, 5, 10, 12, 15], + "b": ["X", "X", "Y", "Z", "Y"], + "left_val": ["a", "b", "c", "d", "e"], + "right_val": [1, np.nan, 11, 15, 16], + } + ) + + result = merge_asof(left, right, on="a", by="b", direction="forward") + tm.assert_frame_equal(result, expected) + + def test_nearest_by(self): + # GH14887 + + left = pd.DataFrame( + { + "a": [1, 5, 10, 12, 15], + "b": ["X", "X", "Z", "Z", "Y"], + "left_val": ["a", "b", "c", "d", "e"], + } + ) + right = pd.DataFrame( + { + "a": [1, 6, 11, 15, 16], + "b": ["X", "Z", "Z", "Z", "Y"], + "right_val": [1, 6, 11, 15, 16], + } + ) + + expected = pd.DataFrame( + { + "a": [1, 5, 10, 12, 15], + "b": ["X", "X", "Z", "Z", "Y"], + "left_val": ["a", "b", "c", "d", "e"], + "right_val": [1, 1, 11, 11, 16], + } + ) + + result = merge_asof(left, right, on="a", by="b", direction="nearest") + tm.assert_frame_equal(result, expected) + + def test_by_int(self): + # we specialize by type, so test that this is correct + df1 = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.020", + "20160525 13:30:00.030", + "20160525 13:30:00.040", + "20160525 13:30:00.050", + "20160525 13:30:00.060", + ] + ), + "key": [1, 2, 1, 3, 2], + "value1": [1.1, 1.2, 1.3, 1.4, 1.5], + }, + columns=["time", "key", "value1"], + ) + + df2 = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.015", + "20160525 13:30:00.020", + "20160525 13:30:00.025", + "20160525 13:30:00.035", + "20160525 13:30:00.040", + "20160525 13:30:00.055", + "20160525 13:30:00.060", + "20160525 13:30:00.065", + ] + ), + "key": [2, 1, 1, 3, 2, 1, 2, 3], + "value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8], + }, + columns=["time", "key", "value2"], + ) + + result = merge_asof(df1, df2, on="time", by="key") + + expected = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.020", + "20160525 13:30:00.030", + "20160525 13:30:00.040", + "20160525 13:30:00.050", + "20160525 13:30:00.060", + ] + ), + "key": [1, 2, 1, 3, 2], + "value1": [1.1, 1.2, 1.3, 1.4, 1.5], + "value2": [2.2, 2.1, 2.3, 2.4, 2.7], + }, + columns=["time", "key", "value1", "value2"], + ) + + tm.assert_frame_equal(result, expected) + + def test_on_float(self): + # mimics how to determine the minimum-price variation + df1 = pd.DataFrame( + { + "price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078], + "symbol": list("ABCDEFG"), + }, + columns=["symbol", "price"], + ) + + df2 = pd.DataFrame( + {"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]}, + columns=["price", "mpv"], + ) + + df1 = df1.sort_values("price").reset_index(drop=True) + + result = merge_asof(df1, df2, on="price") + + expected = pd.DataFrame( + { + "symbol": list("BGACEDF"), + "price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90], + "mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05], + }, + columns=["symbol", "price", "mpv"], + ) + + tm.assert_frame_equal(result, expected) + + def test_on_specialized_type(self, any_real_numpy_dtype): + # see gh-13936 + dtype = np.dtype(any_real_numpy_dtype).type + + df1 = pd.DataFrame( + {"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")}, + columns=["symbol", "value"], + ) + df1.value = dtype(df1.value) + + df2 = pd.DataFrame( + {"value": [0, 80, 120, 125], "result": list("xyzw")}, + columns=["value", "result"], + ) + df2.value = dtype(df2.value) + + df1 = df1.sort_values("value").reset_index(drop=True) + result = merge_asof(df1, df2, on="value") + + expected = pd.DataFrame( + { + "symbol": list("BACEGDF"), + "value": [2, 5, 25, 78, 79, 100, 120], + "result": list("xxxxxyz"), + }, + columns=["symbol", "value", "result"], + ) + expected.value = dtype(expected.value) + + tm.assert_frame_equal(result, expected) + + def test_on_specialized_type_by_int(self, any_real_numpy_dtype): + # see gh-13936 + dtype = np.dtype(any_real_numpy_dtype).type + + df1 = pd.DataFrame( + { + "value": [5, 2, 25, 100, 78, 120, 79], + "key": [1, 2, 3, 2, 3, 1, 2], + "symbol": list("ABCDEFG"), + }, + columns=["symbol", "key", "value"], + ) + df1.value = dtype(df1.value) + + df2 = pd.DataFrame( + {"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")}, + columns=["value", "key", "result"], + ) + df2.value = dtype(df2.value) + + df1 = df1.sort_values("value").reset_index(drop=True) + result = merge_asof(df1, df2, on="value", by="key") + + expected = pd.DataFrame( + { + "symbol": list("BACEGDF"), + "key": [2, 1, 3, 3, 2, 2, 1], + "value": [2, 5, 25, 78, 79, 100, 120], + "result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"], + }, + columns=["symbol", "key", "value", "result"], + ) + expected.value = dtype(expected.value) + + tm.assert_frame_equal(result, expected) + + def test_on_float_by_int(self): + # type specialize both "by" and "on" parameters + df1 = pd.DataFrame( + { + "symbol": list("AAABBBCCC"), + "exch": [1, 2, 3, 1, 2, 3, 1, 2, 3], + "price": [ + 3.26, + 3.2599, + 3.2598, + 12.58, + 12.59, + 12.5, + 378.15, + 378.2, + 378.25, + ], + }, + columns=["symbol", "exch", "price"], + ) + + df2 = pd.DataFrame( + { + "exch": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0], + "mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0], + }, + columns=["exch", "price", "mpv"], + ) + + df1 = df1.sort_values("price").reset_index(drop=True) + df2 = df2.sort_values("price").reset_index(drop=True) + + result = merge_asof(df1, df2, on="price", by="exch") + + expected = pd.DataFrame( + { + "symbol": list("AAABBBCCC"), + "exch": [3, 2, 1, 3, 1, 2, 1, 2, 3], + "price": [ + 3.2598, + 3.2599, + 3.26, + 12.5, + 12.58, + 12.59, + 378.15, + 378.2, + 378.25, + ], + "mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25], + }, + columns=["symbol", "exch", "price", "mpv"], + ) + + tm.assert_frame_equal(result, expected) + + def test_merge_datatype_error_raises(self, using_infer_string): + if using_infer_string: + msg = "incompatible merge keys" + else: + msg = r"Incompatible merge dtype, .*, both sides must have numeric dtype" + + left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]}) + right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]}) + + with pytest.raises(MergeError, match=msg): + merge_asof(left, right, on="a") + + def test_merge_datatype_categorical_error_raises(self): + msg = ( + r"incompatible merge keys \[0\] .* both sides category, " + "but not equal ones" + ) + + left = pd.DataFrame( + {"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])} + ) + right = pd.DataFrame( + { + "right_val": [1, 2, 3, 6, 7], + "a": pd.Categorical(["a", "X", "c", "X", "b"]), + } + ) + + with pytest.raises(MergeError, match=msg): + merge_asof(left, right, on="a") + + def test_merge_groupby_multiple_column_with_categorical_column(self): + # GH 16454 + df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])}) + result = merge_asof(df, df, on="x", by=["y", "z"]) + expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "func", [lambda x: x, lambda x: to_datetime(x)], ids=["numeric", "datetime"] + ) + @pytest.mark.parametrize("side", ["left", "right"]) + def test_merge_on_nans(self, func, side): + # GH 23189 + msg = f"Merge keys contain null values on {side} side" + nulls = func([1.0, 5.0, np.nan]) + non_nulls = func([1.0, 5.0, 10.0]) + df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]}) + df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]}) + + with pytest.raises(ValueError, match=msg): + if side == "left": + merge_asof(df_null, df, on="a") + else: + merge_asof(df, df_null, on="a") + + def test_by_nullable(self, any_numeric_ea_dtype, using_infer_string): + # Note: this test passes if instead of using pd.array we use + # np.array([np.nan, 1]). Other than that, I (@jbrockmendel) + # have NO IDEA what the expected behavior is. + # TODO(GH#32306): may be relevant to the expected behavior here. + + arr = pd.array([pd.NA, 0, 1], dtype=any_numeric_ea_dtype) + if arr.dtype.kind in ["i", "u"]: + max_val = np.iinfo(arr.dtype.numpy_dtype).max + else: + max_val = np.finfo(arr.dtype.numpy_dtype).max + # set value s.t. (at least for integer dtypes) arr._values_for_argsort + # is not an injection + arr[2] = max_val + + left = pd.DataFrame( + { + "by_col1": arr, + "by_col2": ["HELLO", "To", "You"], + "on_col": [2, 4, 6], + "value": ["a", "c", "e"], + } + ) + right = pd.DataFrame( + { + "by_col1": arr, + "by_col2": ["WORLD", "Wide", "Web"], + "on_col": [1, 2, 6], + "value": ["b", "d", "f"], + } + ) + + result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col") + expected = pd.DataFrame( + { + "by_col1": arr, + "by_col2": ["HELLO", "To", "You"], + "on_col": [2, 4, 6], + "value_x": ["a", "c", "e"], + } + ) + expected["value_y"] = np.array([np.nan, np.nan, np.nan], dtype=object) + if using_infer_string: + expected["value_y"] = expected["value_y"].astype("string[pyarrow_numpy]") + tm.assert_frame_equal(result, expected) + + def test_merge_by_col_tz_aware(self): + # GH 21184 + left = pd.DataFrame( + { + "by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "on_col": [2], + "values": ["a"], + } + ) + right = pd.DataFrame( + { + "by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "on_col": [1], + "values": ["b"], + } + ) + result = merge_asof(left, right, by="by_col", on="on_col") + expected = pd.DataFrame( + [[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]], + columns=["by_col", "on_col", "values_x", "values_y"], + ) + tm.assert_frame_equal(result, expected) + + def test_by_mixed_tz_aware(self, using_infer_string): + # GH 26649 + left = pd.DataFrame( + { + "by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "by_col2": ["HELLO"], + "on_col": [2], + "value": ["a"], + } + ) + right = pd.DataFrame( + { + "by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "by_col2": ["WORLD"], + "on_col": [1], + "value": ["b"], + } + ) + result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col") + expected = pd.DataFrame( + [[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]], + columns=["by_col1", "by_col2", "on_col", "value_x"], + ) + expected["value_y"] = np.array([np.nan], dtype=object) + if using_infer_string: + expected["value_y"] = expected["value_y"].astype("string[pyarrow_numpy]") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["float64", "int16", "m8[ns]", "M8[us]"]) + def test_by_dtype(self, dtype): + # GH 55453, GH 22794 + left = pd.DataFrame( + { + "by_col": np.array([1], dtype=dtype), + "on_col": [2], + "value": ["a"], + } + ) + right = pd.DataFrame( + { + "by_col": np.array([1], dtype=dtype), + "on_col": [1], + "value": ["b"], + } + ) + result = merge_asof(left, right, by="by_col", on="on_col") + expected = pd.DataFrame( + { + "by_col": np.array([1], dtype=dtype), + "on_col": [2], + "value_x": ["a"], + "value_y": ["b"], + } + ) + tm.assert_frame_equal(result, expected) + + def test_timedelta_tolerance_nearest(self, unit): + # GH 27642 + if unit == "s": + pytest.skip( + "This test is invalid with unit='s' because that would " + "round left['time']" + ) + + left = pd.DataFrame( + list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])), + columns=["time", "left"], + ) + + left["time"] = pd.to_timedelta(left["time"], "ms").astype(f"m8[{unit}]") + + right = pd.DataFrame( + list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])), + columns=["time", "right"], + ) + + right["time"] = pd.to_timedelta(right["time"], "ms").astype(f"m8[{unit}]") + + expected = pd.DataFrame( + list( + zip( + [0, 5, 10, 15, 20, 25], + [0, 1, 2, 3, 4, 5], + [0, np.nan, 2, 4, np.nan, np.nan], + ) + ), + columns=["time", "left", "right"], + ) + + expected["time"] = pd.to_timedelta(expected["time"], "ms").astype(f"m8[{unit}]") + + result = merge_asof( + left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest" + ) + + tm.assert_frame_equal(result, expected) + + def test_int_type_tolerance(self, any_int_dtype): + # GH #28870 + + left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]}) + right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]}) + left["a"] = left["a"].astype(any_int_dtype) + right["a"] = right["a"].astype(any_int_dtype) + + expected = pd.DataFrame( + {"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]} + ) + expected["a"] = expected["a"].astype(any_int_dtype) + + result = merge_asof(left, right, on="a", tolerance=10) + tm.assert_frame_equal(result, expected) + + def test_merge_index_column_tz(self): + # GH 29864 + index = pd.date_range("2019-10-01", freq="30min", periods=5, tz="UTC") + left = pd.DataFrame([0.9, 0.8, 0.7, 0.6], columns=["xyz"], index=index[1:]) + right = pd.DataFrame({"from_date": index, "abc": [2.46] * 4 + [2.19]}) + result = merge_asof( + left=left, right=right, left_index=True, right_on=["from_date"] + ) + expected = pd.DataFrame( + { + "xyz": [0.9, 0.8, 0.7, 0.6], + "from_date": index[1:], + "abc": [2.46] * 3 + [2.19], + }, + index=pd.date_range( + "2019-10-01 00:30:00", freq="30min", periods=4, tz="UTC" + ), + ) + tm.assert_frame_equal(result, expected) + + result = merge_asof( + left=right, right=left, right_index=True, left_on=["from_date"] + ) + expected = pd.DataFrame( + { + "from_date": index, + "abc": [2.46] * 4 + [2.19], + "xyz": [np.nan, 0.9, 0.8, 0.7, 0.6], + }, + index=Index([0, 1, 2, 3, 4]), + ) + tm.assert_frame_equal(result, expected) + + def test_left_index_right_index_tolerance(self, unit): + # https://github.com/pandas-dev/pandas/issues/35558 + if unit == "s": + pytest.skip( + "This test is invalid with unit='s' because that would round dr1" + ) + + dr1 = pd.date_range( + start="1/1/2020", end="1/20/2020", freq="2D", unit=unit + ) + Timedelta(seconds=0.4).as_unit(unit) + dr2 = pd.date_range(start="1/1/2020", end="2/1/2020", unit=unit) + + df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1)) + df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2)) + + expected = pd.DataFrame( + {"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1) + ) + result = merge_asof( + df1, + df2, + left_index=True, + right_index=True, + tolerance=Timedelta(seconds=0.5), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +@pytest.mark.parametrize( + "kwargs", [{"on": "x"}, {"left_index": True, "right_index": True}] +) +@pytest.mark.parametrize( + "data", + [["2019-06-01 00:09:12", "2019-06-01 00:10:29"], [1.0, "2019-06-01 00:10:29"]], +) +def test_merge_asof_non_numerical_dtype(kwargs, data, infer_string): + # GH#29130 + with option_context("future.infer_string", infer_string): + left = pd.DataFrame({"x": data}, index=data) + right = pd.DataFrame({"x": data}, index=data) + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + merge_asof(left, right, **kwargs) + + +def test_merge_asof_non_numerical_dtype_object(): + # GH#29130 + left = pd.DataFrame({"a": ["12", "13", "15"], "left_val1": ["a", "b", "c"]}) + right = pd.DataFrame({"a": ["a", "b", "c"], "left_val": ["d", "e", "f"]}) + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + merge_asof( + left, + right, + left_on="left_val1", + right_on="a", + left_by="a", + right_by="left_val", + ) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"right_index": True, "left_index": True}, + {"left_on": "left_time", "right_index": True}, + {"left_index": True, "right_on": "right"}, + ], +) +def test_merge_asof_index_behavior(kwargs): + # GH 33463 + index = Index([1, 5, 10], name="test") + left = pd.DataFrame({"left": ["a", "b", "c"], "left_time": [1, 4, 10]}, index=index) + right = pd.DataFrame({"right": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) + result = merge_asof(left, right, **kwargs) + + expected = pd.DataFrame( + {"left": ["a", "b", "c"], "left_time": [1, 4, 10], "right": [1, 3, 7]}, + index=index, + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_numeric_column_in_index(): + # GH#34488 + left = pd.DataFrame({"b": [10, 11, 12]}, index=Index([1, 2, 3], name="a")) + right = pd.DataFrame({"c": [20, 21, 22]}, index=Index([0, 2, 3], name="a")) + + result = merge_asof(left, right, left_on="a", right_on="a") + expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]}) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_numeric_column_in_multiindex(): + # GH#34488 + left = pd.DataFrame( + {"b": [10, 11, 12]}, + index=pd.MultiIndex.from_arrays([[1, 2, 3], ["a", "b", "c"]], names=["a", "z"]), + ) + right = pd.DataFrame( + {"c": [20, 21, 22]}, + index=pd.MultiIndex.from_arrays([[1, 2, 3], ["x", "y", "z"]], names=["a", "y"]), + ) + + result = merge_asof(left, right, left_on="a", right_on="a") + expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]}) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_numeri_column_in_index_object_dtype(): + # GH#34488 + left = pd.DataFrame({"b": [10, 11, 12]}, index=Index(["1", "2", "3"], name="a")) + right = pd.DataFrame({"c": [20, 21, 22]}, index=Index(["m", "n", "o"], name="a")) + + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + merge_asof(left, right, left_on="a", right_on="a") + + left = left.reset_index().set_index(["a", "b"]) + right = right.reset_index().set_index(["a", "c"]) + + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + merge_asof(left, right, left_on="a", right_on="a") + + +def test_merge_asof_array_as_on(unit): + # GH#42844 + dti = pd.DatetimeIndex( + ["2021/01/01 00:37", "2021/01/01 01:40"], dtype=f"M8[{unit}]" + ) + right = pd.DataFrame( + { + "a": [2, 6], + "ts": dti, + } + ) + ts_merge = pd.date_range( + start=pd.Timestamp("2021/01/01 00:00"), periods=3, freq="1h", unit=unit + ) + left = pd.DataFrame({"b": [4, 8, 7]}) + result = merge_asof( + left, + right, + left_on=ts_merge, + right_on="ts", + allow_exact_matches=False, + direction="backward", + ) + expected = pd.DataFrame({"b": [4, 8, 7], "a": [np.nan, 2, 6], "ts": ts_merge}) + tm.assert_frame_equal(result, expected) + + result = merge_asof( + right, + left, + left_on="ts", + right_on=ts_merge, + allow_exact_matches=False, + direction="backward", + ) + expected = pd.DataFrame( + { + "a": [2, 6], + "ts": dti, + "b": [4, 8], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_raise_for_duplicate_columns(): + # GH#50102 + left = pd.DataFrame([[1, 2, "a"]], columns=["a", "a", "left_val"]) + right = pd.DataFrame([[1, 1, 1]], columns=["a", "a", "right_val"]) + + with pytest.raises(ValueError, match="column label 'a'"): + merge_asof(left, right, on="a") + + with pytest.raises(ValueError, match="column label 'a'"): + merge_asof(left, right, left_on="a", right_on="right_val") + + with pytest.raises(ValueError, match="column label 'a'"): + merge_asof(left, right, left_on="left_val", right_on="a") + + +@pytest.mark.parametrize( + "dtype", + [ + "Int64", + pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")), + pytest.param("timestamp[s][pyarrow]", marks=td.skip_if_no("pyarrow")), + ], +) +def test_merge_asof_extension_dtype(dtype): + # GH 52904 + left = pd.DataFrame( + { + "join_col": [1, 3, 5], + "left_val": [1, 2, 3], + } + ) + right = pd.DataFrame( + { + "join_col": [2, 3, 4], + "right_val": [1, 2, 3], + } + ) + left = left.astype({"join_col": dtype}) + right = right.astype({"join_col": dtype}) + result = merge_asof(left, right, on="join_col") + expected = pd.DataFrame( + { + "join_col": [1, 3, 5], + "left_val": [1, 2, 3], + "right_val": [np.nan, 2.0, 3.0], + } + ) + expected = expected.astype({"join_col": dtype}) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("pyarrow") +def test_merge_asof_pyarrow_td_tolerance(): + # GH 56486 + ser = pd.Series( + [datetime.datetime(2023, 1, 1)], dtype="timestamp[us, UTC][pyarrow]" + ) + df = pd.DataFrame( + { + "timestamp": ser, + "value": [1], + } + ) + result = merge_asof(df, df, on="timestamp", tolerance=Timedelta("1s")) + expected = pd.DataFrame( + { + "timestamp": ser, + "value_x": [1], + "value_y": [1], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_read_only_ndarray(): + # GH 53513 + left = pd.Series([2], index=[2], name="left") + right = pd.Series([1], index=[1], name="right") + # set to read-only + left.index.values.flags.writeable = False + right.index.values.flags.writeable = False + result = merge_asof(left, right, left_index=True, right_index=True) + expected = pd.DataFrame({"left": [2], "right": [1]}, index=[2]) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_multiby_with_categorical(): + # GH 43541 + left = pd.DataFrame( + { + "c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]), + "c2": ["x"] * 4, + "t": [1] * 4, + "v": range(4), + } + ) + right = pd.DataFrame( + { + "c1": pd.Categorical(["b", "b"], categories=["b", "a"]), + "c2": ["x"] * 2, + "t": [1, 2], + "v": range(2), + } + ) + result = merge_asof( + left, + right, + by=["c1", "c2"], + on="t", + direction="forward", + suffixes=["_left", "_right"], + ) + expected = pd.DataFrame( + { + "c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]), + "c2": ["x"] * 4, + "t": [1] * 4, + "v_left": range(4), + "v_right": [np.nan, np.nan, 0.0, 0.0], + } + ) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_cross.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_cross.py new file mode 100644 index 0000000000000000000000000000000000000000..14f9036e43fce13580916dacfd6409e4639d025e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_cross.py @@ -0,0 +1,111 @@ +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.core.reshape.merge import ( + MergeError, + merge, +) + + +@pytest.mark.parametrize( + ("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])] +) +def test_merge_cross(input_col, output_cols): + # GH#5401 + left = DataFrame({"a": [1, 3]}) + right = DataFrame({input_col: [3, 4]}) + left_copy = left.copy() + right_copy = right.copy() + result = merge(left, right, how="cross") + expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]}) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(left, left_copy) + tm.assert_frame_equal(right, right_copy) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"left_index": True}, + {"right_index": True}, + {"on": "a"}, + {"left_on": "a"}, + {"right_on": "b"}, + ], +) +def test_merge_cross_error_reporting(kwargs): + # GH#5401 + left = DataFrame({"a": [1, 3]}) + right = DataFrame({"b": [3, 4]}) + msg = ( + "Can not pass on, right_on, left_on or set right_index=True or " + "left_index=True" + ) + with pytest.raises(MergeError, match=msg): + merge(left, right, how="cross", **kwargs) + + +def test_merge_cross_mixed_dtypes(): + # GH#5401 + left = DataFrame(["a", "b", "c"], columns=["A"]) + right = DataFrame(range(2), columns=["B"]) + result = merge(left, right, how="cross") + expected = DataFrame({"A": ["a", "a", "b", "b", "c", "c"], "B": [0, 1, 0, 1, 0, 1]}) + tm.assert_frame_equal(result, expected) + + +def test_merge_cross_more_than_one_column(): + # GH#5401 + left = DataFrame({"A": list("ab"), "B": [2, 1]}) + right = DataFrame({"C": range(2), "D": range(4, 6)}) + result = merge(left, right, how="cross") + expected = DataFrame( + { + "A": ["a", "a", "b", "b"], + "B": [2, 2, 1, 1], + "C": [0, 1, 0, 1], + "D": [4, 5, 4, 5], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_cross_null_values(nulls_fixture): + # GH#5401 + left = DataFrame({"a": [1, nulls_fixture]}) + right = DataFrame({"b": ["a", "b"], "c": [1.0, 2.0]}) + result = merge(left, right, how="cross") + expected = DataFrame( + { + "a": [1, 1, nulls_fixture, nulls_fixture], + "b": ["a", "b", "a", "b"], + "c": [1.0, 2.0, 1.0, 2.0], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_join_cross_error_reporting(): + # GH#5401 + left = DataFrame({"a": [1, 3]}) + right = DataFrame({"a": [3, 4]}) + msg = ( + "Can not pass on, right_on, left_on or set right_index=True or " + "left_index=True" + ) + with pytest.raises(MergeError, match=msg): + left.join(right, how="cross", on="a") + + +def test_merge_cross_series(): + # GH#54055 + ls = Series([1, 2, 3, 4], index=[1, 2, 3, 4], name="left") + rs = Series([3, 4, 5, 6], index=[3, 4, 5, 6], name="right") + res = merge(ls, rs, how="cross") + + expected = merge(ls.to_frame(), rs.to_frame(), how="cross") + tm.assert_frame_equal(res, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_index_as_string.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_index_as_string.py new file mode 100644 index 0000000000000000000000000000000000000000..b4271d4face4fb4e90477196cec7919845f37813 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_index_as_string.py @@ -0,0 +1,186 @@ +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +@pytest.fixture +def df1(): + return DataFrame( + { + "outer": [1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4], + "inner": [1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2], + "v1": np.linspace(0, 1, 11), + } + ) + + +@pytest.fixture +def df2(): + return DataFrame( + { + "outer": [1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3], + "inner": [1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3], + "v2": np.linspace(10, 11, 12), + } + ) + + +@pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) +def left_df(request, df1): + """Construct left test DataFrame with specified levels + (any of 'outer', 'inner', and 'v1') + """ + levels = request.param + if levels: + df1 = df1.set_index(levels) + + return df1 + + +@pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) +def right_df(request, df2): + """Construct right test DataFrame with specified levels + (any of 'outer', 'inner', and 'v2') + """ + levels = request.param + + if levels: + df2 = df2.set_index(levels) + + return df2 + + +def compute_expected(df_left, df_right, on=None, left_on=None, right_on=None, how=None): + """ + Compute the expected merge result for the test case. + + This method computes the expected result of merging two DataFrames on + a combination of their columns and index levels. It does so by + explicitly dropping/resetting their named index levels, performing a + merge on their columns, and then finally restoring the appropriate + index in the result. + + Parameters + ---------- + df_left : DataFrame + The left DataFrame (may have zero or more named index levels) + df_right : DataFrame + The right DataFrame (may have zero or more named index levels) + on : list of str + The on parameter to the merge operation + left_on : list of str + The left_on parameter to the merge operation + right_on : list of str + The right_on parameter to the merge operation + how : str + The how parameter to the merge operation + + Returns + ------- + DataFrame + The expected merge result + """ + # Handle on param if specified + if on is not None: + left_on, right_on = on, on + + # Compute input named index levels + left_levels = [n for n in df_left.index.names if n is not None] + right_levels = [n for n in df_right.index.names if n is not None] + + # Compute output named index levels + output_levels = [i for i in left_on if i in right_levels and i in left_levels] + + # Drop index levels that aren't involved in the merge + drop_left = [n for n in left_levels if n not in left_on] + if drop_left: + df_left = df_left.reset_index(drop_left, drop=True) + + drop_right = [n for n in right_levels if n not in right_on] + if drop_right: + df_right = df_right.reset_index(drop_right, drop=True) + + # Convert remaining index levels to columns + reset_left = [n for n in left_levels if n in left_on] + if reset_left: + df_left = df_left.reset_index(level=reset_left) + + reset_right = [n for n in right_levels if n in right_on] + if reset_right: + df_right = df_right.reset_index(level=reset_right) + + # Perform merge + expected = df_left.merge(df_right, left_on=left_on, right_on=right_on, how=how) + + # Restore index levels + if output_levels: + expected = expected.set_index(output_levels) + + return expected + + +@pytest.mark.parametrize( + "on,how", + [ + (["outer"], "inner"), + (["inner"], "left"), + (["outer", "inner"], "right"), + (["inner", "outer"], "outer"), + ], +) +def test_merge_indexes_and_columns_on(left_df, right_df, on, how): + # Construct expected result + expected = compute_expected(left_df, right_df, on=on, how=how) + + # Perform merge + result = left_df.merge(right_df, on=on, how=how) + tm.assert_frame_equal(result, expected, check_like=True) + + +@pytest.mark.parametrize( + "left_on,right_on,how", + [ + (["outer"], ["outer"], "inner"), + (["inner"], ["inner"], "right"), + (["outer", "inner"], ["outer", "inner"], "left"), + (["inner", "outer"], ["inner", "outer"], "outer"), + ], +) +def test_merge_indexes_and_columns_lefton_righton( + left_df, right_df, left_on, right_on, how +): + # Construct expected result + expected = compute_expected( + left_df, right_df, left_on=left_on, right_on=right_on, how=how + ) + + # Perform merge + result = left_df.merge(right_df, left_on=left_on, right_on=right_on, how=how) + tm.assert_frame_equal(result, expected, check_like=True) + + +@pytest.mark.parametrize("left_index", ["inner", ["inner", "outer"]]) +def test_join_indexes_and_columns_on(df1, df2, left_index, join_type): + # Construct left_df + left_df = df1.set_index(left_index) + + # Construct right_df + right_df = df2.set_index(["outer", "inner"]) + + # Result + expected = ( + left_df.reset_index() + .join( + right_df, on=["outer", "inner"], how=join_type, lsuffix="_x", rsuffix="_y" + ) + .set_index(left_index) + ) + + # Perform join + result = left_df.join( + right_df, on=["outer", "inner"], how=join_type, lsuffix="_x", rsuffix="_y" + ) + + tm.assert_frame_equal(result, expected, check_like=True) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_ordered.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_ordered.py new file mode 100644 index 0000000000000000000000000000000000000000..0bd3ca3cf2c1bd8ceae38d8a04cd6d48c4c69f68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_ordered.py @@ -0,0 +1,244 @@ +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + merge_ordered, +) +import pandas._testing as tm + + +@pytest.fixture +def left(): + return DataFrame({"key": ["a", "c", "e"], "lvalue": [1, 2.0, 3]}) + + +@pytest.fixture +def right(): + return DataFrame({"key": ["b", "c", "d", "f"], "rvalue": [1, 2, 3.0, 4]}) + + +class TestMergeOrdered: + def test_basic(self, left, right): + result = merge_ordered(left, right, on="key") + expected = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "f"], + "lvalue": [1, np.nan, 2, np.nan, 3, np.nan], + "rvalue": [np.nan, 1, 2, 3, np.nan, 4], + } + ) + + tm.assert_frame_equal(result, expected) + + def test_ffill(self, left, right): + result = merge_ordered(left, right, on="key", fill_method="ffill") + expected = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "f"], + "lvalue": [1.0, 1, 2, 2, 3, 3.0], + "rvalue": [np.nan, 1, 2, 3, 3, 4], + } + ) + tm.assert_frame_equal(result, expected) + + def test_multigroup(self, left, right): + left = pd.concat([left, left], ignore_index=True) + + left["group"] = ["a"] * 3 + ["b"] * 3 + + result = merge_ordered( + left, right, on="key", left_by="group", fill_method="ffill" + ) + expected = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "f"] * 2, + "lvalue": [1.0, 1, 2, 2, 3, 3.0] * 2, + "rvalue": [np.nan, 1, 2, 3, 3, 4] * 2, + } + ) + expected["group"] = ["a"] * 6 + ["b"] * 6 + + tm.assert_frame_equal(result, expected.loc[:, result.columns]) + + result2 = merge_ordered( + right, left, on="key", right_by="group", fill_method="ffill" + ) + tm.assert_frame_equal(result, result2.loc[:, result.columns]) + + result = merge_ordered(left, right, on="key", left_by="group") + assert result["group"].notna().all() + + @pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" + ) + def test_merge_type(self, left, right): + class NotADataFrame(DataFrame): + @property + def _constructor(self): + return NotADataFrame + + nad = NotADataFrame(left) + result = nad.merge(right, on="key") + + assert isinstance(result, NotADataFrame) + + @pytest.mark.parametrize( + "df_seq, pattern", + [ + ((), "[Nn]o objects"), + ([], "[Nn]o objects"), + ({}, "[Nn]o objects"), + ([None], "objects.*None"), + ([None, None], "objects.*None"), + ], + ) + def test_empty_sequence_concat(self, df_seq, pattern): + # GH 9157 + with pytest.raises(ValueError, match=pattern): + pd.concat(df_seq) + + @pytest.mark.parametrize( + "arg", [[DataFrame()], [None, DataFrame()], [DataFrame(), None]] + ) + def test_empty_sequence_concat_ok(self, arg): + pd.concat(arg) + + def test_doc_example(self): + left = DataFrame( + { + "group": list("aaabbb"), + "key": ["a", "c", "e", "a", "c", "e"], + "lvalue": [1, 2, 3] * 2, + } + ) + + right = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) + + result = merge_ordered(left, right, fill_method="ffill", left_by="group") + + expected = DataFrame( + { + "group": list("aaaaabbbbb"), + "key": ["a", "b", "c", "d", "e"] * 2, + "lvalue": [1, 1, 2, 2, 3] * 2, + "rvalue": [np.nan, 1, 2, 3, 3] * 2, + } + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "left, right, on, left_by, right_by, expected", + [ + ( + DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}), + DataFrame({"T": [2], "E": [1]}), + ["T"], + ["G", "H"], + None, + DataFrame( + { + "G": ["g"] * 3, + "H": ["h"] * 3, + "T": [1, 2, 3], + "E": [np.nan, 1.0, np.nan], + } + ), + ), + ( + DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}), + DataFrame({"T": [2], "E": [1]}), + "T", + ["G", "H"], + None, + DataFrame( + { + "G": ["g"] * 3, + "H": ["h"] * 3, + "T": [1, 2, 3], + "E": [np.nan, 1.0, np.nan], + } + ), + ), + ( + DataFrame({"T": [2], "E": [1]}), + DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}), + ["T"], + None, + ["G", "H"], + DataFrame( + { + "T": [1, 2, 3], + "E": [np.nan, 1.0, np.nan], + "G": ["g"] * 3, + "H": ["h"] * 3, + } + ), + ), + ], + ) + def test_list_type_by(self, left, right, on, left_by, right_by, expected): + # GH 35269 + result = merge_ordered( + left=left, + right=right, + on=on, + left_by=left_by, + right_by=right_by, + ) + + tm.assert_frame_equal(result, expected) + + def test_left_by_length_equals_to_right_shape0(self): + # GH 38166 + left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE")) + right = DataFrame([[2, 1]], columns=list("ET")) + result = merge_ordered(left, right, on="E", left_by=["G", "H"]) + expected = DataFrame( + {"G": ["g"] * 3, "H": ["h"] * 3, "E": [1, 2, 3], "T": [np.nan, 1.0, np.nan]} + ) + + tm.assert_frame_equal(result, expected) + + def test_elements_not_in_by_but_in_df(self): + # GH 38167 + left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE")) + right = DataFrame([[2, 1]], columns=list("ET")) + msg = r"\{'h'\} not found in left columns" + with pytest.raises(KeyError, match=msg): + merge_ordered(left, right, on="E", left_by=["G", "h"]) + + @pytest.mark.parametrize("invalid_method", ["linear", "carrot"]) + def test_ffill_validate_fill_method(self, left, right, invalid_method): + # GH 55884 + with pytest.raises( + ValueError, match=re.escape("fill_method must be 'ffill' or None") + ): + merge_ordered(left, right, on="key", fill_method=invalid_method) + + def test_ffill_left_merge(self): + # GH 57010 + df1 = DataFrame( + { + "key": ["a", "c", "e", "a", "c", "e"], + "lvalue": [1, 2, 3, 1, 2, 3], + "group": ["a", "a", "a", "b", "b", "b"], + } + ) + df2 = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) + result = merge_ordered( + df1, df2, fill_method="ffill", left_by="group", how="left" + ) + expected = DataFrame( + { + "key": ["a", "c", "e", "a", "c", "e"], + "lvalue": [1, 2, 3, 1, 2, 3], + "group": ["a", "a", "a", "b", "b", "b"], + "rvalue": [np.nan, 2.0, 2.0, np.nan, 2.0, 2.0], + } + ) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_multi.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_multi.py new file mode 100644 index 0000000000000000000000000000000000000000..402ff049884ba25e557d8c415fa9b50030d13b06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_multi.py @@ -0,0 +1,934 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, + Timestamp, + option_context, +) +import pandas._testing as tm +from pandas.core.reshape.concat import concat +from pandas.core.reshape.merge import merge + + +@pytest.fixture +def left(): + """left dataframe (not multi-indexed) for multi-index join tests""" + # a little relevant example with NAs + key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"] + key2 = ["two", "one", "three", "one", "two", "one", "two", "two", "three", "one"] + + data = np.random.default_rng(2).standard_normal(len(key1)) + return DataFrame({"key1": key1, "key2": key2, "data": data}) + + +@pytest.fixture +def right(multiindex_dataframe_random_data): + """right dataframe (multi-indexed) for multi-index join tests""" + df = multiindex_dataframe_random_data + df.index.names = ["key1", "key2"] + + df.columns = ["j_one", "j_two", "j_three"] + return df + + +@pytest.fixture +def left_multi(): + return DataFrame( + { + "Origin": ["A", "A", "B", "B", "C"], + "Destination": ["A", "B", "A", "C", "A"], + "Period": ["AM", "AM", "IP", "AM", "OP"], + "TripPurp": ["hbw", "nhb", "hbo", "nhb", "hbw"], + "Trips": [1987, 3647, 2470, 4296, 4444], + }, + columns=["Origin", "Destination", "Period", "TripPurp", "Trips"], + ).set_index(["Origin", "Destination", "Period", "TripPurp"]) + + +@pytest.fixture +def right_multi(): + return DataFrame( + { + "Origin": ["A", "A", "B", "B", "C", "C", "E"], + "Destination": ["A", "B", "A", "B", "A", "B", "F"], + "Period": ["AM", "AM", "IP", "AM", "OP", "IP", "AM"], + "LinkType": ["a", "b", "c", "b", "a", "b", "a"], + "Distance": [100, 80, 90, 80, 75, 35, 55], + }, + columns=["Origin", "Destination", "Period", "LinkType", "Distance"], + ).set_index(["Origin", "Destination", "Period", "LinkType"]) + + +@pytest.fixture +def on_cols_multi(): + return ["Origin", "Destination", "Period"] + + +class TestMergeMulti: + def test_merge_on_multikey(self, left, right, join_type): + on_cols = ["key1", "key2"] + result = left.join(right, on=on_cols, how=join_type).reset_index(drop=True) + + expected = merge(left, right.reset_index(), on=on_cols, how=join_type) + + tm.assert_frame_equal(result, expected) + + result = left.join(right, on=on_cols, how=join_type, sort=True).reset_index( + drop=True + ) + + expected = merge( + left, right.reset_index(), on=on_cols, how=join_type, sort=True + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + @pytest.mark.parametrize("sort", [True, False]) + def test_left_join_multi_index(self, sort, infer_string): + with option_context("future.infer_string", infer_string): + icols = ["1st", "2nd", "3rd"] + + def bind_cols(df): + iord = lambda a: 0 if a != a else ord(a) + f = lambda ts: ts.map(iord) - ord("a") + return f(df["1st"]) + f(df["3rd"]) * 1e2 + df["2nd"].fillna(0) * 10 + + def run_asserts(left, right, sort): + res = left.join(right, on=icols, how="left", sort=sort) + + assert len(left) < len(res) + 1 + assert not res["4th"].isna().any() + assert not res["5th"].isna().any() + + tm.assert_series_equal(res["4th"], -res["5th"], check_names=False) + result = bind_cols(res.iloc[:, :-2]) + tm.assert_series_equal(res["4th"], result, check_names=False) + assert result.name is None + + if sort: + tm.assert_frame_equal(res, res.sort_values(icols, kind="mergesort")) + + out = merge(left, right.reset_index(), on=icols, sort=sort, how="left") + + res.index = RangeIndex(len(res)) + tm.assert_frame_equal(out, res) + + lc = list(map(chr, np.arange(ord("a"), ord("z") + 1))) + left = DataFrame( + np.random.default_rng(2).choice(lc, (50, 2)), columns=["1st", "3rd"] + ) + # Explicit cast to float to avoid implicit cast when setting nan + left.insert( + 1, + "2nd", + np.random.default_rng(2).integers(0, 10, len(left)).astype("float"), + ) + + i = np.random.default_rng(2).permutation(len(left)) + right = left.iloc[i].copy() + + left["4th"] = bind_cols(left) + right["5th"] = -bind_cols(right) + right.set_index(icols, inplace=True) + + run_asserts(left, right, sort) + + # inject some nulls + left.loc[1::4, "1st"] = np.nan + left.loc[2::5, "2nd"] = np.nan + left.loc[3::6, "3rd"] = np.nan + left["4th"] = bind_cols(left) + + i = np.random.default_rng(2).permutation(len(left)) + right = left.iloc[i, :-1] + right["5th"] = -bind_cols(right) + right.set_index(icols, inplace=True) + + run_asserts(left, right, sort) + + @pytest.mark.parametrize("sort", [False, True]) + def test_merge_right_vs_left(self, left, right, sort): + # compare left vs right merge with multikey + on_cols = ["key1", "key2"] + merged_left_right = left.merge( + right, left_on=on_cols, right_index=True, how="left", sort=sort + ) + + merge_right_left = right.merge( + left, right_on=on_cols, left_index=True, how="right", sort=sort + ) + + # Reorder columns + merge_right_left = merge_right_left[merged_left_right.columns] + + tm.assert_frame_equal(merged_left_right, merge_right_left) + + def test_merge_multiple_cols_with_mixed_cols_index(self): + # GH29522 + s = Series( + range(6), + MultiIndex.from_product([["A", "B"], [1, 2, 3]], names=["lev1", "lev2"]), + name="Amount", + ) + df = DataFrame({"lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": 0}) + result = merge(df, s.reset_index(), on=["lev1", "lev2"]) + expected = DataFrame( + { + "lev1": list("AAABBB"), + "lev2": [1, 2, 3, 1, 2, 3], + "col": [0] * 6, + "Amount": range(6), + } + ) + tm.assert_frame_equal(result, expected) + + def test_compress_group_combinations(self): + # ~ 40000000 possible unique groups + key1 = [str(i) for i in range(10000)] + key1 = np.tile(key1, 2) + key2 = key1[::-1] + + df = DataFrame( + { + "key1": key1, + "key2": key2, + "value1": np.random.default_rng(2).standard_normal(20000), + } + ) + + df2 = DataFrame( + { + "key1": key1[::2], + "key2": key2[::2], + "value2": np.random.default_rng(2).standard_normal(10000), + } + ) + + # just to hit the label compression code path + merge(df, df2, how="outer") + + def test_left_join_index_preserve_order(self): + on_cols = ["k1", "k2"] + left = DataFrame( + { + "k1": [0, 1, 2] * 8, + "k2": ["foo", "bar"] * 12, + "v": np.array(np.arange(24), dtype=np.int64), + } + ) + + index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")]) + right = DataFrame({"v2": [5, 7]}, index=index) + + result = left.join(right, on=on_cols) + + expected = left.copy() + expected["v2"] = np.nan + expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5 + expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7 + + tm.assert_frame_equal(result, expected) + + result.sort_values(on_cols, kind="mergesort", inplace=True) + expected = left.join(right, on=on_cols, sort=True) + + tm.assert_frame_equal(result, expected) + + # test join with multi dtypes blocks + left = DataFrame( + { + "k1": [0, 1, 2] * 8, + "k2": ["foo", "bar"] * 12, + "k3": np.array([0, 1, 2] * 8, dtype=np.float32), + "v": np.array(np.arange(24), dtype=np.int32), + } + ) + + index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")]) + right = DataFrame({"v2": [5, 7]}, index=index) + + result = left.join(right, on=on_cols) + + expected = left.copy() + expected["v2"] = np.nan + expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5 + expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7 + + tm.assert_frame_equal(result, expected) + + result = result.sort_values(on_cols, kind="mergesort") + expected = left.join(right, on=on_cols, sort=True) + + tm.assert_frame_equal(result, expected) + + def test_left_join_index_multi_match_multiindex(self): + left = DataFrame( + [ + ["X", "Y", "C", "a"], + ["W", "Y", "C", "e"], + ["V", "Q", "A", "h"], + ["V", "R", "D", "i"], + ["X", "Y", "D", "b"], + ["X", "Y", "A", "c"], + ["W", "Q", "B", "f"], + ["W", "R", "C", "g"], + ["V", "Y", "C", "j"], + ["X", "Y", "B", "d"], + ], + columns=["cola", "colb", "colc", "tag"], + index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8], + ) + + right = DataFrame( + [ + ["W", "R", "C", 0], + ["W", "Q", "B", 3], + ["W", "Q", "B", 8], + ["X", "Y", "A", 1], + ["X", "Y", "A", 4], + ["X", "Y", "B", 5], + ["X", "Y", "C", 6], + ["X", "Y", "C", 9], + ["X", "Q", "C", -6], + ["X", "R", "C", -9], + ["V", "Y", "C", 7], + ["V", "R", "D", 2], + ["V", "R", "D", -1], + ["V", "Q", "A", -3], + ], + columns=["col1", "col2", "col3", "val"], + ).set_index(["col1", "col2", "col3"]) + + result = left.join(right, on=["cola", "colb", "colc"], how="left") + + expected = DataFrame( + [ + ["X", "Y", "C", "a", 6], + ["X", "Y", "C", "a", 9], + ["W", "Y", "C", "e", np.nan], + ["V", "Q", "A", "h", -3], + ["V", "R", "D", "i", 2], + ["V", "R", "D", "i", -1], + ["X", "Y", "D", "b", np.nan], + ["X", "Y", "A", "c", 1], + ["X", "Y", "A", "c", 4], + ["W", "Q", "B", "f", 3], + ["W", "Q", "B", "f", 8], + ["W", "R", "C", "g", 0], + ["V", "Y", "C", "j", 7], + ["X", "Y", "B", "d", 5], + ], + columns=["cola", "colb", "colc", "tag", "val"], + index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8], + ) + + tm.assert_frame_equal(result, expected) + + result = left.join(right, on=["cola", "colb", "colc"], how="left", sort=True) + + expected = expected.sort_values(["cola", "colb", "colc"], kind="mergesort") + + tm.assert_frame_equal(result, expected) + + def test_left_join_index_multi_match(self): + left = DataFrame( + [["c", 0], ["b", 1], ["a", 2], ["b", 3]], + columns=["tag", "val"], + index=[2, 0, 1, 3], + ) + + right = DataFrame( + [ + ["a", "v"], + ["c", "w"], + ["c", "x"], + ["d", "y"], + ["a", "z"], + ["c", "r"], + ["e", "q"], + ["c", "s"], + ], + columns=["tag", "char"], + ).set_index("tag") + + result = left.join(right, on="tag", how="left") + + expected = DataFrame( + [ + ["c", 0, "w"], + ["c", 0, "x"], + ["c", 0, "r"], + ["c", 0, "s"], + ["b", 1, np.nan], + ["a", 2, "v"], + ["a", 2, "z"], + ["b", 3, np.nan], + ], + columns=["tag", "val", "char"], + index=[2, 2, 2, 2, 0, 1, 1, 3], + ) + + tm.assert_frame_equal(result, expected) + + result = left.join(right, on="tag", how="left", sort=True) + expected2 = expected.sort_values("tag", kind="mergesort") + + tm.assert_frame_equal(result, expected2) + + # GH7331 - maintain left frame order in left merge + result = merge(left, right.reset_index(), how="left", on="tag") + expected.index = RangeIndex(len(expected)) + tm.assert_frame_equal(result, expected) + + def test_left_merge_na_buglet(self): + left = DataFrame( + { + "id": list("abcde"), + "v1": np.random.default_rng(2).standard_normal(5), + "v2": np.random.default_rng(2).standard_normal(5), + "dummy": list("abcde"), + "v3": np.random.default_rng(2).standard_normal(5), + }, + columns=["id", "v1", "v2", "dummy", "v3"], + ) + right = DataFrame( + { + "id": ["a", "b", np.nan, np.nan, np.nan], + "sv3": [1.234, 5.678, np.nan, np.nan, np.nan], + } + ) + + result = merge(left, right, on="id", how="left") + + rdf = right.drop(["id"], axis=1) + expected = left.join(rdf) + tm.assert_frame_equal(result, expected) + + def test_merge_na_keys(self): + data = [ + [1950, "A", 1.5], + [1950, "B", 1.5], + [1955, "B", 1.5], + [1960, "B", np.nan], + [1970, "B", 4.0], + [1950, "C", 4.0], + [1960, "C", np.nan], + [1965, "C", 3.0], + [1970, "C", 4.0], + ] + + frame = DataFrame(data, columns=["year", "panel", "data"]) + + other_data = [ + [1960, "A", np.nan], + [1970, "A", np.nan], + [1955, "A", np.nan], + [1965, "A", np.nan], + [1965, "B", np.nan], + [1955, "C", np.nan], + ] + other = DataFrame(other_data, columns=["year", "panel", "data"]) + + result = frame.merge(other, how="outer") + + expected = frame.fillna(-999).merge(other.fillna(-999), how="outer") + expected = expected.replace(-999, np.nan) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) + def test_merge_datetime_index(self, klass): + # see gh-19038 + df = DataFrame( + [1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"] + ) + df.index = pd.to_datetime(df.index) + on_vector = df.index.year + + if klass is not None: + on_vector = klass(on_vector) + + exp_years = np.array([2016, 2017, 2018], dtype=np.int32) + expected = DataFrame({"a": [1, 2, 3], "key_1": exp_years}) + + result = df.merge(df, on=["a", on_vector], how="inner") + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"key_0": exp_years, "a_x": [1, 2, 3], "a_y": [1, 2, 3]}) + + result = df.merge(df, on=[df.index.year], how="inner") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("merge_type", ["left", "right"]) + def test_merge_datetime_multi_index_empty_df(self, merge_type): + # see gh-36895 + + left = DataFrame( + data={ + "data": [1.5, 1.5], + }, + index=MultiIndex.from_tuples( + [[Timestamp("1950-01-01"), "A"], [Timestamp("1950-01-02"), "B"]], + names=["date", "panel"], + ), + ) + + right = DataFrame( + index=MultiIndex.from_tuples([], names=["date", "panel"]), columns=["state"] + ) + + expected_index = MultiIndex.from_tuples( + [[Timestamp("1950-01-01"), "A"], [Timestamp("1950-01-02"), "B"]], + names=["date", "panel"], + ) + + if merge_type == "left": + expected = DataFrame( + data={ + "data": [1.5, 1.5], + "state": np.array([np.nan, np.nan], dtype=object), + }, + index=expected_index, + ) + results_merge = left.merge(right, how="left", on=["date", "panel"]) + results_join = left.join(right, how="left") + else: + expected = DataFrame( + data={ + "state": np.array([np.nan, np.nan], dtype=object), + "data": [1.5, 1.5], + }, + index=expected_index, + ) + results_merge = right.merge(left, how="right", on=["date", "panel"]) + results_join = right.join(left, how="right") + + tm.assert_frame_equal(results_merge, expected) + tm.assert_frame_equal(results_join, expected) + + @pytest.fixture + def household(self): + household = DataFrame( + { + "household_id": [1, 2, 3], + "male": [0, 1, 0], + "wealth": [196087.3, 316478.7, 294750], + }, + columns=["household_id", "male", "wealth"], + ).set_index("household_id") + return household + + @pytest.fixture + def portfolio(self): + portfolio = DataFrame( + { + "household_id": [1, 2, 2, 3, 3, 3, 4], + "asset_id": [ + "nl0000301109", + "nl0000289783", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "nl0000289965", + np.nan, + ], + "name": [ + "ABN Amro", + "Robeco", + "Royal Dutch Shell", + "Royal Dutch Shell", + "AAB Eastern Europe Equity Fund", + "Postbank BioTech Fonds", + np.nan, + ], + "share": [1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0], + }, + columns=["household_id", "asset_id", "name", "share"], + ).set_index(["household_id", "asset_id"]) + return portfolio + + @pytest.fixture + def expected(self): + expected = ( + DataFrame( + { + "male": [0, 1, 1, 0, 0, 0], + "wealth": [ + 196087.3, + 316478.7, + 316478.7, + 294750.0, + 294750.0, + 294750.0, + ], + "name": [ + "ABN Amro", + "Robeco", + "Royal Dutch Shell", + "Royal Dutch Shell", + "AAB Eastern Europe Equity Fund", + "Postbank BioTech Fonds", + ], + "share": [1.00, 0.40, 0.60, 0.15, 0.60, 0.25], + "household_id": [1, 2, 2, 3, 3, 3], + "asset_id": [ + "nl0000301109", + "nl0000289783", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "nl0000289965", + ], + } + ) + .set_index(["household_id", "asset_id"]) + .reindex(columns=["male", "wealth", "name", "share"]) + ) + return expected + + def test_join_multi_levels(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + + # GH 3662 + # merge multi-levels + result = household.join(portfolio, how="inner") + tm.assert_frame_equal(result, expected) + + def test_join_multi_levels_merge_equivalence(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + + # equivalency + result = merge( + household.reset_index(), + portfolio.reset_index(), + on=["household_id"], + how="inner", + ).set_index(["household_id", "asset_id"]) + tm.assert_frame_equal(result, expected) + + def test_join_multi_levels_outer(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + + result = household.join(portfolio, how="outer") + expected = concat( + [ + expected, + ( + DataFrame( + {"share": [1.00]}, + index=MultiIndex.from_tuples( + [(4, np.nan)], names=["household_id", "asset_id"] + ), + ) + ), + ], + axis=0, + sort=True, + ).reindex(columns=expected.columns) + tm.assert_frame_equal(result, expected, check_index_type=False) + + def test_join_multi_levels_invalid(self, portfolio, household): + portfolio = portfolio.copy() + household = household.copy() + + # invalid cases + household.index.name = "foo" + + with pytest.raises( + ValueError, match="cannot join with no overlapping index names" + ): + household.join(portfolio, how="inner") + + portfolio2 = portfolio.copy() + portfolio2.index.set_names(["household_id", "foo"]) + + with pytest.raises(ValueError, match="columns overlap but no suffix specified"): + portfolio2.join(portfolio, how="inner") + + def test_join_multi_levels2(self): + # some more advanced merges + # GH6360 + household = DataFrame( + { + "household_id": [1, 2, 2, 3, 3, 3, 4], + "asset_id": [ + "nl0000301109", + "nl0000301109", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "nl0000289965", + np.nan, + ], + "share": [1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0], + }, + columns=["household_id", "asset_id", "share"], + ).set_index(["household_id", "asset_id"]) + + log_return = DataFrame( + { + "asset_id": [ + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "lu0197800237", + ], + "t": [233, 234, 235, 180, 181], + "log_return": [ + 0.09604978, + -0.06524096, + 0.03532373, + 0.03025441, + 0.036997, + ], + } + ).set_index(["asset_id", "t"]) + + expected = ( + DataFrame( + { + "household_id": [2, 2, 2, 3, 3, 3, 3, 3], + "asset_id": [ + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "lu0197800237", + ], + "t": [233, 234, 235, 233, 234, 235, 180, 181], + "share": [0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], + "log_return": [ + 0.09604978, + -0.06524096, + 0.03532373, + 0.09604978, + -0.06524096, + 0.03532373, + 0.03025441, + 0.036997, + ], + } + ) + .set_index(["household_id", "asset_id", "t"]) + .reindex(columns=["share", "log_return"]) + ) + + # this is the equivalency + result = merge( + household.reset_index(), + log_return.reset_index(), + on=["asset_id"], + how="inner", + ).set_index(["household_id", "asset_id", "t"]) + tm.assert_frame_equal(result, expected) + + expected = ( + DataFrame( + { + "household_id": [2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 2, 4], + "asset_id": [ + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "lu0197800237", + "nl0000289965", + "nl0000301109", + "nl0000301109", + None, + ], + "t": [ + 233, + 234, + 235, + 233, + 234, + 235, + 180, + 181, + None, + None, + None, + None, + ], + "share": [ + 0.6, + 0.6, + 0.6, + 0.15, + 0.15, + 0.15, + 0.6, + 0.6, + 0.25, + 1.0, + 0.4, + 1.0, + ], + "log_return": [ + 0.09604978, + -0.06524096, + 0.03532373, + 0.09604978, + -0.06524096, + 0.03532373, + 0.03025441, + 0.036997, + None, + None, + None, + None, + ], + } + ) + .set_index(["household_id", "asset_id", "t"]) + .reindex(columns=["share", "log_return"]) + ) + + result = merge( + household.reset_index(), + log_return.reset_index(), + on=["asset_id"], + how="outer", + ).set_index(["household_id", "asset_id", "t"]) + + tm.assert_frame_equal(result, expected) + + +class TestJoinMultiMulti: + def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi): + left_names = left_multi.index.names + right_names = right_multi.index.names + if join_type == "right": + level_order = right_names + left_names.difference(right_names) + else: + level_order = left_names + right_names.difference(left_names) + # Multi-index join tests + expected = ( + merge( + left_multi.reset_index(), + right_multi.reset_index(), + how=join_type, + on=on_cols_multi, + ) + .set_index(level_order) + .sort_index() + ) + + result = left_multi.join(right_multi, how=join_type).sort_index() + tm.assert_frame_equal(result, expected) + + def test_join_multi_empty_frames( + self, left_multi, right_multi, join_type, on_cols_multi + ): + left_multi = left_multi.drop(columns=left_multi.columns) + right_multi = right_multi.drop(columns=right_multi.columns) + + left_names = left_multi.index.names + right_names = right_multi.index.names + if join_type == "right": + level_order = right_names + left_names.difference(right_names) + else: + level_order = left_names + right_names.difference(left_names) + + expected = ( + merge( + left_multi.reset_index(), + right_multi.reset_index(), + how=join_type, + on=on_cols_multi, + ) + .set_index(level_order) + .sort_index() + ) + + result = left_multi.join(right_multi, how=join_type).sort_index() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) + def test_merge_datetime_index(self, box): + # see gh-19038 + df = DataFrame( + [1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"] + ) + df.index = pd.to_datetime(df.index) + on_vector = df.index.year + + if box is not None: + on_vector = box(on_vector) + + exp_years = np.array([2016, 2017, 2018], dtype=np.int32) + expected = DataFrame({"a": [1, 2, 3], "key_1": exp_years}) + + result = df.merge(df, on=["a", on_vector], how="inner") + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"key_0": exp_years, "a_x": [1, 2, 3], "a_y": [1, 2, 3]}) + + result = df.merge(df, on=[df.index.year], how="inner") + tm.assert_frame_equal(result, expected) + + def test_single_common_level(self): + index_left = MultiIndex.from_tuples( + [("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"] + ) + + left = DataFrame( + {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=index_left + ) + + index_right = MultiIndex.from_tuples( + [("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"] + ) + + right = DataFrame( + {"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]}, + index=index_right, + ) + + result = left.join(right) + expected = merge( + left.reset_index(), right.reset_index(), on=["key"], how="inner" + ).set_index(["key", "X", "Y"]) + + tm.assert_frame_equal(result, expected) + + def test_join_multi_wrong_order(self): + # GH 25760 + # GH 28956 + + midx1 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + midx3 = MultiIndex.from_tuples([(4, 1), (3, 2), (3, 1)], names=["b", "a"]) + + left = DataFrame(index=midx1, data={"x": [10, 20, 30, 40]}) + right = DataFrame(index=midx3, data={"y": ["foo", "bar", "fing"]}) + + result = left.join(right) + + expected = DataFrame( + index=midx1, + data={"x": [10, 20, 30, 40], "y": ["fing", "foo", "bar", np.nan]}, + ) + + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_crosstab.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_crosstab.py new file mode 100644 index 0000000000000000000000000000000000000000..136e76986df9d86afbf5a6ca8e3650d7745dae3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_crosstab.py @@ -0,0 +1,886 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalDtype, + CategoricalIndex, + DataFrame, + Index, + MultiIndex, + Series, + crosstab, +) +import pandas._testing as tm + + +@pytest.fixture +def df(): + df = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + return pd.concat([df, df], ignore_index=True) + + +class TestCrosstab: + def test_crosstab_single(self, df): + result = crosstab(df["A"], df["C"]) + expected = df.groupby(["A", "C"]).size().unstack() + tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64)) + + def test_crosstab_multiple(self, df): + result = crosstab(df["A"], [df["B"], df["C"]]) + expected = df.groupby(["A", "B", "C"]).size() + expected = expected.unstack("B").unstack("C").fillna(0).astype(np.int64) + tm.assert_frame_equal(result, expected) + + result = crosstab([df["B"], df["C"]], df["A"]) + expected = df.groupby(["B", "C", "A"]).size() + expected = expected.unstack("A").fillna(0).astype(np.int64) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("box", [np.array, list, tuple]) + def test_crosstab_ndarray(self, box): + # GH 44076 + a = box(np.random.default_rng(2).integers(0, 5, size=100)) + b = box(np.random.default_rng(2).integers(0, 3, size=100)) + c = box(np.random.default_rng(2).integers(0, 10, size=100)) + + df = DataFrame({"a": a, "b": b, "c": c}) + + result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c")) + expected = crosstab(df["a"], [df["b"], df["c"]]) + tm.assert_frame_equal(result, expected) + + result = crosstab([b, c], a, colnames=["a"], rownames=("b", "c")) + expected = crosstab([df["b"], df["c"]], df["a"]) + tm.assert_frame_equal(result, expected) + + # assign arbitrary names + result = crosstab(a, c) + expected = crosstab(df["a"], df["c"]) + expected.index.names = ["row_0"] + expected.columns.names = ["col_0"] + tm.assert_frame_equal(result, expected) + + def test_crosstab_non_aligned(self): + # GH 17005 + a = Series([0, 1, 1], index=["a", "b", "c"]) + b = Series([3, 4, 3, 4, 3], index=["a", "b", "c", "d", "f"]) + c = np.array([3, 4, 3], dtype=np.int64) + + expected = DataFrame( + [[1, 0], [1, 1]], + index=Index([0, 1], name="row_0"), + columns=Index([3, 4], name="col_0"), + ) + + result = crosstab(a, b) + tm.assert_frame_equal(result, expected) + + result = crosstab(a, c) + tm.assert_frame_equal(result, expected) + + def test_crosstab_margins(self): + a = np.random.default_rng(2).integers(0, 7, size=100) + b = np.random.default_rng(2).integers(0, 3, size=100) + c = np.random.default_rng(2).integers(0, 5, size=100) + + df = DataFrame({"a": a, "b": b, "c": c}) + + result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"), margins=True) + + assert result.index.names == ("a",) + assert result.columns.names == ["b", "c"] + + all_cols = result["All", ""] + exp_cols = df.groupby(["a"]).size().astype("i8") + # to keep index.name + exp_margin = Series([len(df)], index=Index(["All"], name="a")) + exp_cols = pd.concat([exp_cols, exp_margin]) + exp_cols.name = ("All", "") + + tm.assert_series_equal(all_cols, exp_cols) + + all_rows = result.loc["All"] + exp_rows = df.groupby(["b", "c"]).size().astype("i8") + exp_rows = pd.concat([exp_rows, Series([len(df)], index=[("All", "")])]) + exp_rows.name = "All" + + exp_rows = exp_rows.reindex(all_rows.index) + exp_rows = exp_rows.fillna(0).astype(np.int64) + tm.assert_series_equal(all_rows, exp_rows) + + def test_crosstab_margins_set_margin_name(self): + # GH 15972 + a = np.random.default_rng(2).integers(0, 7, size=100) + b = np.random.default_rng(2).integers(0, 3, size=100) + c = np.random.default_rng(2).integers(0, 5, size=100) + + df = DataFrame({"a": a, "b": b, "c": c}) + + result = crosstab( + a, + [b, c], + rownames=["a"], + colnames=("b", "c"), + margins=True, + margins_name="TOTAL", + ) + + assert result.index.names == ("a",) + assert result.columns.names == ["b", "c"] + + all_cols = result["TOTAL", ""] + exp_cols = df.groupby(["a"]).size().astype("i8") + # to keep index.name + exp_margin = Series([len(df)], index=Index(["TOTAL"], name="a")) + exp_cols = pd.concat([exp_cols, exp_margin]) + exp_cols.name = ("TOTAL", "") + + tm.assert_series_equal(all_cols, exp_cols) + + all_rows = result.loc["TOTAL"] + exp_rows = df.groupby(["b", "c"]).size().astype("i8") + exp_rows = pd.concat([exp_rows, Series([len(df)], index=[("TOTAL", "")])]) + exp_rows.name = "TOTAL" + + exp_rows = exp_rows.reindex(all_rows.index) + exp_rows = exp_rows.fillna(0).astype(np.int64) + tm.assert_series_equal(all_rows, exp_rows) + + msg = "margins_name argument must be a string" + for margins_name in [666, None, ["a", "b"]]: + with pytest.raises(ValueError, match=msg): + crosstab( + a, + [b, c], + rownames=["a"], + colnames=("b", "c"), + margins=True, + margins_name=margins_name, + ) + + def test_crosstab_pass_values(self): + a = np.random.default_rng(2).integers(0, 7, size=100) + b = np.random.default_rng(2).integers(0, 3, size=100) + c = np.random.default_rng(2).integers(0, 5, size=100) + values = np.random.default_rng(2).standard_normal(100) + + table = crosstab( + [a, b], c, values, aggfunc="sum", rownames=["foo", "bar"], colnames=["baz"] + ) + + df = DataFrame({"foo": a, "bar": b, "baz": c, "values": values}) + + expected = df.pivot_table( + "values", index=["foo", "bar"], columns="baz", aggfunc="sum" + ) + tm.assert_frame_equal(table, expected) + + def test_crosstab_dropna(self): + # GH 3820 + a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object) + b = np.array(["one", "one", "two", "one", "two", "two", "two"], dtype=object) + c = np.array( + ["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object + ) + res = crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"], dropna=False) + m = MultiIndex.from_tuples( + [("one", "dull"), ("one", "shiny"), ("two", "dull"), ("two", "shiny")], + names=["b", "c"], + ) + tm.assert_index_equal(res.columns, m) + + def test_crosstab_no_overlap(self): + # GS 10291 + + s1 = Series([1, 2, 3], index=[1, 2, 3]) + s2 = Series([4, 5, 6], index=[4, 5, 6]) + + actual = crosstab(s1, s2) + expected = DataFrame( + index=Index([], dtype="int64", name="row_0"), + columns=Index([], dtype="int64", name="col_0"), + ) + + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna(self): + # GH 12577 + # pivot_table counts null into margin ('All') + # when margins=true and dropna=true + + df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]}) + actual = crosstab(df.a, df.b, margins=True, dropna=True) + expected = DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]]) + expected.index = Index([1.0, 2.0, "All"], name="a") + expected.columns = Index([3, 4, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna2(self): + df = DataFrame( + {"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]} + ) + actual = crosstab(df.a, df.b, margins=True, dropna=True) + expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]]) + expected.index = Index([1.0, 2.0, "All"], name="a") + expected.columns = Index([3.0, 4.0, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna3(self): + df = DataFrame( + {"a": [1, np.nan, np.nan, np.nan, np.nan, 2], "b": [3, 3, 4, 4, 4, 4]} + ) + actual = crosstab(df.a, df.b, margins=True, dropna=True) + expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]]) + expected.index = Index([1.0, 2.0, "All"], name="a") + expected.columns = Index([3, 4, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna4(self): + # GH 12642 + # _add_margins raises KeyError: Level None not found + # when margins=True and dropna=False + # GH: 10772: Keep np.nan in result with dropna=False + df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]}) + actual = crosstab(df.a, df.b, margins=True, dropna=False) + expected = DataFrame([[1, 0, 1.0], [1, 3, 4.0], [0, 1, np.nan], [2, 4, 6.0]]) + expected.index = Index([1.0, 2.0, np.nan, "All"], name="a") + expected.columns = Index([3, 4, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna5(self): + # GH: 10772: Keep np.nan in result with dropna=False + df = DataFrame( + {"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]} + ) + actual = crosstab(df.a, df.b, margins=True, dropna=False) + expected = DataFrame( + [[1, 0, 0, 1.0], [0, 1, 0, 1.0], [0, 3, 1, np.nan], [1, 4, 0, 6.0]] + ) + expected.index = Index([1.0, 2.0, np.nan, "All"], name="a") + expected.columns = Index([3.0, 4.0, np.nan, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna6(self): + # GH: 10772: Keep np.nan in result with dropna=False + a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object) + b = np.array(["one", "one", "two", "one", "two", np.nan, "two"], dtype=object) + c = np.array( + ["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object + ) + + actual = crosstab( + a, [b, c], rownames=["a"], colnames=["b", "c"], margins=True, dropna=False + ) + m = MultiIndex.from_arrays( + [ + ["one", "one", "two", "two", np.nan, np.nan, "All"], + ["dull", "shiny", "dull", "shiny", "dull", "shiny", ""], + ], + names=["b", "c"], + ) + expected = DataFrame( + [[1, 0, 1, 0, 0, 0, 2], [2, 0, 1, 1, 0, 1, 5], [3, 0, 2, 1, 0, 0, 7]], + columns=m, + ) + expected.index = Index(["bar", "foo", "All"], name="a") + tm.assert_frame_equal(actual, expected) + + actual = crosstab( + [a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=False + ) + m = MultiIndex.from_arrays( + [ + ["bar", "bar", "bar", "foo", "foo", "foo", "All"], + ["one", "two", np.nan, "one", "two", np.nan, ""], + ], + names=["a", "b"], + ) + expected = DataFrame( + [ + [1, 0, 1.0], + [1, 0, 1.0], + [0, 0, np.nan], + [2, 0, 2.0], + [1, 1, 2.0], + [0, 1, np.nan], + [5, 2, 7.0], + ], + index=m, + ) + expected.columns = Index(["dull", "shiny", "All"], name="c") + tm.assert_frame_equal(actual, expected) + + actual = crosstab( + [a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=True + ) + m = MultiIndex.from_arrays( + [["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]], + names=["a", "b"], + ) + expected = DataFrame( + [[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 1, 6]], index=m + ) + expected.columns = Index(["dull", "shiny", "All"], name="c") + tm.assert_frame_equal(actual, expected) + + def test_crosstab_normalize(self): + # Issue 12578 + df = DataFrame( + {"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]} + ) + + rindex = Index([1, 2], name="a") + cindex = Index([3, 4], name="b") + full_normal = DataFrame([[0.2, 0], [0.2, 0.6]], index=rindex, columns=cindex) + row_normal = DataFrame([[1.0, 0], [0.25, 0.75]], index=rindex, columns=cindex) + col_normal = DataFrame([[0.5, 0], [0.5, 1.0]], index=rindex, columns=cindex) + + # Check all normalize args + tm.assert_frame_equal(crosstab(df.a, df.b, normalize="all"), full_normal) + tm.assert_frame_equal(crosstab(df.a, df.b, normalize=True), full_normal) + tm.assert_frame_equal(crosstab(df.a, df.b, normalize="index"), row_normal) + tm.assert_frame_equal(crosstab(df.a, df.b, normalize="columns"), col_normal) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize=1), + crosstab(df.a, df.b, normalize="columns"), + ) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index") + ) + + row_normal_margins = DataFrame( + [[1.0, 0], [0.25, 0.75], [0.4, 0.6]], + index=Index([1, 2, "All"], name="a", dtype="object"), + columns=Index([3, 4], name="b", dtype="object"), + ) + col_normal_margins = DataFrame( + [[0.5, 0, 0.2], [0.5, 1.0, 0.8]], + index=Index([1, 2], name="a", dtype="object"), + columns=Index([3, 4, "All"], name="b", dtype="object"), + ) + + all_normal_margins = DataFrame( + [[0.2, 0, 0.2], [0.2, 0.6, 0.8], [0.4, 0.6, 1]], + index=Index([1, 2, "All"], name="a", dtype="object"), + columns=Index([3, 4, "All"], name="b", dtype="object"), + ) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins + ) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins + ) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins + ) + + def test_crosstab_normalize_arrays(self): + # GH#12578 + df = DataFrame( + {"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]} + ) + + # Test arrays + crosstab( + [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])], np.array([1, 2, 1, 2]) + ) + + # Test with aggfunc + norm_counts = DataFrame( + [[0.25, 0, 0.25], [0.25, 0.5, 0.75], [0.5, 0.5, 1]], + index=Index([1, 2, "All"], name="a", dtype="object"), + columns=Index([3, 4, "All"], name="b"), + ) + test_case = crosstab( + df.a, df.b, df.c, aggfunc="count", normalize="all", margins=True + ) + tm.assert_frame_equal(test_case, norm_counts) + + df = DataFrame( + {"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [0, 4, np.nan, 3, 3]} + ) + + norm_sum = DataFrame( + [[0, 0, 0.0], [0.4, 0.6, 1], [0.4, 0.6, 1]], + index=Index([1, 2, "All"], name="a", dtype="object"), + columns=Index([3, 4, "All"], name="b", dtype="object"), + ) + msg = "using DataFrameGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + test_case = crosstab( + df.a, df.b, df.c, aggfunc=np.sum, normalize="all", margins=True + ) + tm.assert_frame_equal(test_case, norm_sum) + + def test_crosstab_with_empties(self, using_array_manager): + # Check handling of empties + df = DataFrame( + { + "a": [1, 2, 2, 2, 2], + "b": [3, 3, 4, 4, 4], + "c": [np.nan, np.nan, np.nan, np.nan, np.nan], + } + ) + + empty = DataFrame( + [[0.0, 0.0], [0.0, 0.0]], + index=Index([1, 2], name="a", dtype="int64"), + columns=Index([3, 4], name="b"), + ) + + for i in [True, "index", "columns"]: + calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=i) + tm.assert_frame_equal(empty, calculated) + + nans = DataFrame( + [[0.0, np.nan], [0.0, 0.0]], + index=Index([1, 2], name="a", dtype="int64"), + columns=Index([3, 4], name="b"), + ) + if using_array_manager: + # INFO(ArrayManager) column without NaNs can preserve int dtype + nans[3] = nans[3].astype("int64") + + calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=False) + tm.assert_frame_equal(nans, calculated) + + def test_crosstab_errors(self): + # Issue 12578 + + df = DataFrame( + {"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]} + ) + + error = "values cannot be used without an aggfunc." + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, values=df.c) + + error = "aggfunc cannot be used without values" + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, aggfunc=np.mean) + + error = "Not a valid normalize argument" + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, normalize="42") + + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, normalize=42) + + error = "Not a valid margins argument" + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, normalize="all", margins=42) + + def test_crosstab_with_categorial_columns(self): + # GH 8860 + df = DataFrame( + { + "MAKE": ["Honda", "Acura", "Tesla", "Honda", "Honda", "Acura"], + "MODEL": ["Sedan", "Sedan", "Electric", "Pickup", "Sedan", "Sedan"], + } + ) + categories = ["Sedan", "Electric", "Pickup"] + df["MODEL"] = df["MODEL"].astype("category").cat.set_categories(categories) + result = crosstab(df["MAKE"], df["MODEL"]) + + expected_index = Index(["Acura", "Honda", "Tesla"], name="MAKE") + expected_columns = CategoricalIndex( + categories, categories=categories, ordered=False, name="MODEL" + ) + expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]] + expected = DataFrame( + expected_data, index=expected_index, columns=expected_columns + ) + tm.assert_frame_equal(result, expected) + + def test_crosstab_with_numpy_size(self): + # GH 4003 + df = DataFrame( + { + "A": ["one", "one", "two", "three"] * 6, + "B": ["A", "B", "C"] * 8, + "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4, + "D": np.random.default_rng(2).standard_normal(24), + "E": np.random.default_rng(2).standard_normal(24), + } + ) + result = crosstab( + index=[df["A"], df["B"]], + columns=[df["C"]], + margins=True, + aggfunc=np.size, + values=df["D"], + ) + expected_index = MultiIndex( + levels=[["All", "one", "three", "two"], ["", "A", "B", "C"]], + codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0], [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]], + names=["A", "B"], + ) + expected_column = Index(["bar", "foo", "All"], name="C") + expected_data = np.array( + [ + [2.0, 2.0, 4.0], + [2.0, 2.0, 4.0], + [2.0, 2.0, 4.0], + [2.0, np.nan, 2.0], + [np.nan, 2.0, 2.0], + [2.0, np.nan, 2.0], + [np.nan, 2.0, 2.0], + [2.0, np.nan, 2.0], + [np.nan, 2.0, 2.0], + [12.0, 12.0, 24.0], + ] + ) + expected = DataFrame( + expected_data, index=expected_index, columns=expected_column + ) + # aggfunc is np.size, resulting in integers + expected["All"] = expected["All"].astype("int64") + tm.assert_frame_equal(result, expected) + + def test_crosstab_duplicate_names(self): + # GH 13279 / 22529 + + s1 = Series(range(3), name="foo") + s2_foo = Series(range(1, 4), name="foo") + s2_bar = Series(range(1, 4), name="bar") + s3 = Series(range(3), name="waldo") + + # check result computed with duplicate labels against + # result computed with unique labels, then relabelled + mapper = {"bar": "foo"} + + # duplicate row, column labels + result = crosstab(s1, s2_foo) + expected = crosstab(s1, s2_bar).rename_axis(columns=mapper, axis=1) + tm.assert_frame_equal(result, expected) + + # duplicate row, unique column labels + result = crosstab([s1, s2_foo], s3) + expected = crosstab([s1, s2_bar], s3).rename_axis(index=mapper, axis=0) + tm.assert_frame_equal(result, expected) + + # unique row, duplicate column labels + result = crosstab(s3, [s1, s2_foo]) + expected = crosstab(s3, [s1, s2_bar]).rename_axis(columns=mapper, axis=1) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]]) + def test_crosstab_tuple_name(self, names): + s1 = Series(range(3), name=names[0]) + s2 = Series(range(1, 4), name=names[1]) + + mi = MultiIndex.from_arrays([range(3), range(1, 4)], names=names) + expected = Series(1, index=mi).unstack(1, fill_value=0) + + result = crosstab(s1, s2) + tm.assert_frame_equal(result, expected) + + def test_crosstab_both_tuple_names(self): + # GH 18321 + s1 = Series(range(3), name=("a", "b")) + s2 = Series(range(3), name=("c", "d")) + + expected = DataFrame( + np.eye(3, dtype="int64"), + index=Index(range(3), name=("a", "b")), + columns=Index(range(3), name=("c", "d")), + ) + result = crosstab(s1, s2) + tm.assert_frame_equal(result, expected) + + def test_crosstab_unsorted_order(self): + df = DataFrame({"b": [3, 1, 2], "a": [5, 4, 6]}, index=["C", "A", "B"]) + result = crosstab(df.index, [df.b, df.a]) + e_idx = Index(["A", "B", "C"], name="row_0") + e_columns = MultiIndex.from_tuples([(1, 4), (2, 6), (3, 5)], names=["b", "a"]) + expected = DataFrame( + [[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns + ) + tm.assert_frame_equal(result, expected) + + def test_crosstab_normalize_multiple_columns(self): + # GH 15150 + df = DataFrame( + { + "A": ["one", "one", "two", "three"] * 6, + "B": ["A", "B", "C"] * 8, + "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4, + "D": [0] * 24, + "E": [0] * 24, + } + ) + + msg = "using DataFrameGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = crosstab( + [df.A, df.B], + df.C, + values=df.D, + aggfunc=np.sum, + normalize=True, + margins=True, + ) + expected = DataFrame( + np.array([0] * 29 + [1], dtype=float).reshape(10, 3), + columns=Index(["bar", "foo", "All"], name="C"), + index=MultiIndex.from_tuples( + [ + ("one", "A"), + ("one", "B"), + ("one", "C"), + ("three", "A"), + ("three", "B"), + ("three", "C"), + ("two", "A"), + ("two", "B"), + ("two", "C"), + ("All", ""), + ], + names=["A", "B"], + ), + ) + tm.assert_frame_equal(result, expected) + + def test_margin_normalize(self): + # GH 27500 + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + # normalize on index + result = crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0 + ) + expected = DataFrame( + [[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]] + ) + expected.index = MultiIndex( + levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + expected.columns = Index(["large", "small"], name="C") + tm.assert_frame_equal(result, expected) + + # normalize on columns + result = crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1 + ) + expected = DataFrame( + [ + [0.25, 0.2, 0.222222], + [0.25, 0.2, 0.222222], + [0.5, 0.2, 0.333333], + [0, 0.4, 0.222222], + ] + ) + expected.columns = Index(["large", "small", "Sub-Total"], name="C") + expected.index = MultiIndex( + levels=[["bar", "foo"], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=["A", "B"], + ) + tm.assert_frame_equal(result, expected) + + # normalize on both index and column + result = crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True + ) + expected = DataFrame( + [ + [0.111111, 0.111111, 0.222222], + [0.111111, 0.111111, 0.222222], + [0.222222, 0.111111, 0.333333], + [0.000000, 0.222222, 0.222222], + [0.444444, 0.555555, 1], + ] + ) + expected.columns = Index(["large", "small", "Sub-Total"], name="C") + expected.index = MultiIndex( + levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + tm.assert_frame_equal(result, expected) + + def test_margin_normalize_multiple_columns(self): + # GH 35144 + # use multiple columns with margins and normalization + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + result = crosstab( + index=df.C, + columns=[df.A, df.B], + margins=True, + margins_name="margin", + normalize=True, + ) + expected = DataFrame( + [ + [0.111111, 0.111111, 0.222222, 0.000000, 0.444444], + [0.111111, 0.111111, 0.111111, 0.222222, 0.555556], + [0.222222, 0.222222, 0.333333, 0.222222, 1.0], + ], + index=["large", "small", "margin"], + ) + expected.columns = MultiIndex( + levels=[["bar", "foo", "margin"], ["", "one", "two"]], + codes=[[0, 0, 1, 1, 2], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + expected.index.name = "C" + tm.assert_frame_equal(result, expected) + + def test_margin_support_Float(self): + # GH 50313 + # use Float64 formats and function aggfunc with margins + df = DataFrame( + {"A": [1, 2, 2, 1], "B": [3, 3, 4, 5], "C": [-1.0, 10.0, 1.0, 10.0]}, + dtype="Float64", + ) + result = crosstab( + df["A"], + df["B"], + values=df["C"], + aggfunc="sum", + margins=True, + ) + expected = DataFrame( + [ + [-1.0, pd.NA, 10.0, 9.0], + [10.0, 1.0, pd.NA, 11.0], + [9.0, 1.0, 10.0, 20.0], + ], + index=Index([1.0, 2.0, "All"], dtype="object", name="A"), + columns=Index([3.0, 4.0, 5.0, "All"], dtype="object", name="B"), + dtype="Float64", + ) + tm.assert_frame_equal(result, expected) + + def test_margin_with_ordered_categorical_column(self): + # GH 25278 + df = DataFrame( + { + "First": ["B", "B", "C", "A", "B", "C"], + "Second": ["C", "B", "B", "B", "C", "A"], + } + ) + df["First"] = df["First"].astype(CategoricalDtype(ordered=True)) + customized_categories_order = ["C", "A", "B"] + df["First"] = df["First"].cat.reorder_categories(customized_categories_order) + result = crosstab(df["First"], df["Second"], margins=True) + + expected_index = Index(["C", "A", "B", "All"], name="First") + expected_columns = Index(["A", "B", "C", "All"], name="Second") + expected_data = [[1, 1, 0, 2], [0, 1, 0, 1], [0, 1, 2, 3], [1, 3, 2, 6]] + expected = DataFrame( + expected_data, index=expected_index, columns=expected_columns + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("a_dtype", ["category", "int64"]) +@pytest.mark.parametrize("b_dtype", ["category", "int64"]) +def test_categoricals(a_dtype, b_dtype): + # https://github.com/pandas-dev/pandas/issues/37465 + g = np.random.default_rng(2) + a = Series(g.integers(0, 3, size=100)).astype(a_dtype) + b = Series(g.integers(0, 2, size=100)).astype(b_dtype) + result = crosstab(a, b, margins=True, dropna=False) + columns = Index([0, 1, "All"], dtype="object", name="col_0") + index = Index([0, 1, 2, "All"], dtype="object", name="row_0") + values = [[10, 18, 28], [23, 16, 39], [17, 16, 33], [50, 50, 100]] + expected = DataFrame(values, index, columns) + tm.assert_frame_equal(result, expected) + + # Verify when categorical does not have all values present + a.loc[a == 1] = 2 + a_is_cat = isinstance(a.dtype, CategoricalDtype) + assert not a_is_cat or a.value_counts().loc[1] == 0 + result = crosstab(a, b, margins=True, dropna=False) + values = [[10, 18, 28], [0, 0, 0], [40, 32, 72], [50, 50, 100]] + expected = DataFrame(values, index, columns) + if not a_is_cat: + expected = expected.loc[[0, 2, "All"]] + expected["All"] = expected["All"].astype("int64") + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_cut.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_cut.py new file mode 100644 index 0000000000000000000000000000000000000000..0811c69859c0dadf9b3b909437d9a6a16a584c24 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_cut.py @@ -0,0 +1,791 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + Interval, + IntervalIndex, + Series, + TimedeltaIndex, + Timestamp, + cut, + date_range, + interval_range, + isna, + qcut, + timedelta_range, + to_datetime, +) +import pandas._testing as tm +from pandas.api.types import CategoricalDtype +import pandas.core.reshape.tile as tmod + + +def test_simple(): + data = np.ones(5, dtype="int64") + result = cut(data, 4, labels=False) + + expected = np.array([1, 1, 1, 1, 1]) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + +@pytest.mark.parametrize("func", [list, np.array]) +def test_bins(func): + data = func([0.2, 1.4, 2.5, 6.2, 9.7, 2.1]) + result, bins = cut(data, 3, retbins=True) + + intervals = IntervalIndex.from_breaks(bins.round(3)) + intervals = intervals.take([0, 0, 0, 1, 2, 0]) + expected = Categorical(intervals, ordered=True) + + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, 6.53333333, 9.7])) + + +def test_right(): + data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) + result, bins = cut(data, 4, right=True, retbins=True) + + intervals = IntervalIndex.from_breaks(bins.round(3)) + expected = Categorical(intervals, ordered=True) + expected = expected.take([0, 0, 0, 2, 3, 0, 0]) + + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7])) + + +def test_no_right(): + data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) + result, bins = cut(data, 4, right=False, retbins=True) + + intervals = IntervalIndex.from_breaks(bins.round(3), closed="left") + intervals = intervals.take([0, 0, 0, 2, 3, 0, 1]) + expected = Categorical(intervals, ordered=True) + + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095])) + + +def test_bins_from_interval_index(): + c = cut(range(5), 3) + expected = c + result = cut(range(5), bins=expected.categories) + tm.assert_categorical_equal(result, expected) + + expected = Categorical.from_codes( + np.append(c.codes, -1), categories=c.categories, ordered=True + ) + result = cut(range(6), bins=expected.categories) + tm.assert_categorical_equal(result, expected) + + +def test_bins_from_interval_index_doc_example(): + # Make sure we preserve the bins. + ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60]) + c = cut(ages, bins=[0, 18, 35, 70]) + expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)]) + tm.assert_index_equal(c.categories, expected) + + result = cut([25, 20, 50], bins=c.categories) + tm.assert_index_equal(result.categories, expected) + tm.assert_numpy_array_equal(result.codes, np.array([1, 1, 2], dtype="int8")) + + +def test_bins_not_overlapping_from_interval_index(): + # see gh-23980 + msg = "Overlapping IntervalIndex is not accepted" + ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)]) + + with pytest.raises(ValueError, match=msg): + cut([5, 6], bins=ii) + + +def test_bins_not_monotonic(): + msg = "bins must increase monotonically" + data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1] + + with pytest.raises(ValueError, match=msg): + cut(data, [0.1, 1.5, 1, 10]) + + +@pytest.mark.parametrize( + "x, bins, expected", + [ + ( + date_range("2017-12-31", periods=3), + [Timestamp.min, Timestamp("2018-01-01"), Timestamp.max], + IntervalIndex.from_tuples( + [ + (Timestamp.min, Timestamp("2018-01-01")), + (Timestamp("2018-01-01"), Timestamp.max), + ] + ), + ), + ( + [-1, 0, 1], + np.array( + [np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max], dtype="int64" + ), + IntervalIndex.from_tuples( + [(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)] + ), + ), + ( + [ + np.timedelta64(-1, "ns"), + np.timedelta64(0, "ns"), + np.timedelta64(1, "ns"), + ], + np.array( + [ + np.timedelta64(-np.iinfo(np.int64).max, "ns"), + np.timedelta64(0, "ns"), + np.timedelta64(np.iinfo(np.int64).max, "ns"), + ] + ), + IntervalIndex.from_tuples( + [ + ( + np.timedelta64(-np.iinfo(np.int64).max, "ns"), + np.timedelta64(0, "ns"), + ), + ( + np.timedelta64(0, "ns"), + np.timedelta64(np.iinfo(np.int64).max, "ns"), + ), + ] + ), + ), + ], +) +def test_bins_monotonic_not_overflowing(x, bins, expected): + # GH 26045 + result = cut(x, bins) + tm.assert_index_equal(result.categories, expected) + + +def test_wrong_num_labels(): + msg = "Bin labels must be one fewer than the number of bin edges" + data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1] + + with pytest.raises(ValueError, match=msg): + cut(data, [0, 1, 10], labels=["foo", "bar", "baz"]) + + +@pytest.mark.parametrize( + "x,bins,msg", + [ + ([], 2, "Cannot cut empty array"), + ([1, 2, 3], 0.5, "`bins` should be a positive integer"), + ], +) +def test_cut_corner(x, bins, msg): + with pytest.raises(ValueError, match=msg): + cut(x, bins) + + +@pytest.mark.parametrize("arg", [2, np.eye(2), DataFrame(np.eye(2))]) +@pytest.mark.parametrize("cut_func", [cut, qcut]) +def test_cut_not_1d_arg(arg, cut_func): + msg = "Input array must be 1 dimensional" + with pytest.raises(ValueError, match=msg): + cut_func(arg, 2) + + +@pytest.mark.parametrize( + "data", + [ + [0, 1, 2, 3, 4, np.inf], + [-np.inf, 0, 1, 2, 3, 4], + [-np.inf, 0, 1, 2, 3, 4, np.inf], + ], +) +def test_int_bins_with_inf(data): + # GH 24314 + msg = "cannot specify integer `bins` when input data contains infinity" + with pytest.raises(ValueError, match=msg): + cut(data, bins=3) + + +def test_cut_out_of_range_more(): + # see gh-1511 + name = "x" + + ser = Series([0, -1, 0, 1, -3], name=name) + ind = cut(ser, [0, 1], labels=False) + + exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name) + tm.assert_series_equal(ind, exp) + + +@pytest.mark.parametrize( + "right,breaks,closed", + [ + (True, [-1e-3, 0.25, 0.5, 0.75, 1], "right"), + (False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], "left"), + ], +) +def test_labels(right, breaks, closed): + arr = np.tile(np.arange(0, 1.01, 0.1), 4) + + result, bins = cut(arr, 4, retbins=True, right=right) + ex_levels = IntervalIndex.from_breaks(breaks, closed=closed) + tm.assert_index_equal(result.categories, ex_levels) + + +def test_cut_pass_series_name_to_factor(): + name = "foo" + ser = Series(np.random.default_rng(2).standard_normal(100), name=name) + + factor = cut(ser, 4) + assert factor.name == name + + +def test_label_precision(): + arr = np.arange(0, 0.73, 0.01) + result = cut(arr, 4, precision=2) + + ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72]) + tm.assert_index_equal(result.categories, ex_levels) + + +@pytest.mark.parametrize("labels", [None, False]) +def test_na_handling(labels): + arr = np.arange(0, 0.75, 0.01) + arr[::3] = np.nan + + result = cut(arr, 4, labels=labels) + result = np.asarray(result) + + expected = np.where(isna(arr), np.nan, result) + tm.assert_almost_equal(result, expected) + + +def test_inf_handling(): + data = np.arange(6) + data_ser = Series(data, dtype="int64") + + bins = [-np.inf, 2, 4, np.inf] + result = cut(data, bins) + result_ser = cut(data_ser, bins) + + ex_uniques = IntervalIndex.from_breaks(bins) + tm.assert_index_equal(result.categories, ex_uniques) + + assert result[5] == Interval(4, np.inf) + assert result[0] == Interval(-np.inf, 2) + assert result_ser[5] == Interval(4, np.inf) + assert result_ser[0] == Interval(-np.inf, 2) + + +def test_cut_out_of_bounds(): + arr = np.random.default_rng(2).standard_normal(100) + result = cut(arr, [-1, 0, 1]) + + mask = isna(result) + ex_mask = (arr < -1) | (arr > 1) + tm.assert_numpy_array_equal(mask, ex_mask) + + +@pytest.mark.parametrize( + "get_labels,get_expected", + [ + ( + lambda labels: labels, + lambda labels: Categorical( + ["Medium"] + 4 * ["Small"] + ["Medium", "Large"], + categories=labels, + ordered=True, + ), + ), + ( + lambda labels: Categorical.from_codes([0, 1, 2], labels), + lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels), + ), + ], +) +def test_cut_pass_labels(get_labels, get_expected): + bins = [0, 25, 50, 100] + arr = [50, 5, 10, 15, 20, 30, 70] + labels = ["Small", "Medium", "Large"] + + result = cut(arr, bins, labels=get_labels(labels)) + tm.assert_categorical_equal(result, get_expected(labels)) + + +def test_cut_pass_labels_compat(): + # see gh-16459 + arr = [50, 5, 10, 15, 20, 30, 70] + labels = ["Good", "Medium", "Bad"] + + result = cut(arr, 3, labels=labels) + exp = cut(arr, 3, labels=Categorical(labels, categories=labels, ordered=True)) + tm.assert_categorical_equal(result, exp) + + +@pytest.mark.parametrize("x", [np.arange(11.0), np.arange(11.0) / 1e10]) +def test_round_frac_just_works(x): + # It works. + cut(x, 2) + + +@pytest.mark.parametrize( + "val,precision,expected", + [ + (-117.9998, 3, -118), + (117.9998, 3, 118), + (117.9998, 2, 118), + (0.000123456, 2, 0.00012), + ], +) +def test_round_frac(val, precision, expected): + # see gh-1979 + result = tmod._round_frac(val, precision=precision) + assert result == expected + + +def test_cut_return_intervals(): + ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8]) + result = cut(ser, 3) + + exp_bins = np.linspace(0, 8, num=4).round(3) + exp_bins[0] -= 0.008 + + expected = Series( + IntervalIndex.from_breaks(exp_bins, closed="right").take( + [0, 0, 0, 1, 1, 1, 2, 2, 2] + ) + ).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(result, expected) + + +def test_series_ret_bins(): + # see gh-8589 + ser = Series(np.arange(4)) + result, bins = cut(ser, 2, retbins=True) + + expected = Series( + IntervalIndex.from_breaks([-0.003, 1.5, 3], closed="right").repeat(2) + ).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,msg", + [ + ({"duplicates": "drop"}, None), + ({}, "Bin edges must be unique"), + ({"duplicates": "raise"}, "Bin edges must be unique"), + ({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"), + ], +) +def test_cut_duplicates_bin(kwargs, msg): + # see gh-20947 + bins = [0, 2, 4, 6, 10, 10] + values = Series(np.array([1, 3, 5, 7, 9]), index=["a", "b", "c", "d", "e"]) + + if msg is not None: + with pytest.raises(ValueError, match=msg): + cut(values, bins, **kwargs) + else: + result = cut(values, bins, **kwargs) + expected = cut(values, pd.unique(np.asarray(bins))) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("data", [9.0, -9.0, 0.0]) +@pytest.mark.parametrize("length", [1, 2]) +def test_single_bin(data, length): + # see gh-14652, gh-15428 + ser = Series([data] * length) + result = cut(ser, 1, labels=False) + + expected = Series([0] * length, dtype=np.intp) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "array_1_writeable,array_2_writeable", [(True, True), (True, False), (False, False)] +) +def test_cut_read_only(array_1_writeable, array_2_writeable): + # issue 18773 + array_1 = np.arange(0, 100, 10) + array_1.flags.writeable = array_1_writeable + + array_2 = np.arange(0, 100, 10) + array_2.flags.writeable = array_2_writeable + + hundred_elements = np.arange(100) + tm.assert_categorical_equal( + cut(hundred_elements, array_1), cut(hundred_elements, array_2) + ) + + +@pytest.mark.parametrize( + "conv", + [ + lambda v: Timestamp(v), + lambda v: to_datetime(v), + lambda v: np.datetime64(v), + lambda v: Timestamp(v).to_pydatetime(), + ], +) +def test_datetime_bin(conv): + data = [np.datetime64("2012-12-13"), np.datetime64("2012-12-15")] + bin_data = ["2012-12-12", "2012-12-14", "2012-12-16"] + + expected = Series( + IntervalIndex( + [ + Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])), + Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2])), + ] + ) + ).astype(CategoricalDtype(ordered=True)) + + bins = [conv(v) for v in bin_data] + result = Series(cut(data, bins=bins)) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("box", [Series, Index, np.array, list]) +def test_datetime_cut(unit, box): + # see gh-14714 + # + # Testing time data when it comes in various collection types. + data = to_datetime(["2013-01-01", "2013-01-02", "2013-01-03"]).astype(f"M8[{unit}]") + data = box(data) + result, _ = cut(data, 3, retbins=True) + + if box is list: + # We don't (yet) do inference on these, so get nanos + unit = "ns" + + if unit == "s": + # See https://github.com/pandas-dev/pandas/pull/56101#discussion_r1405325425 + # for why we round to 8 seconds instead of 7 + left = DatetimeIndex( + ["2012-12-31 23:57:08", "2013-01-01 16:00:00", "2013-01-02 08:00:00"], + dtype=f"M8[{unit}]", + ) + else: + left = DatetimeIndex( + [ + "2012-12-31 23:57:07.200000", + "2013-01-01 16:00:00", + "2013-01-02 08:00:00", + ], + dtype=f"M8[{unit}]", + ) + right = DatetimeIndex( + ["2013-01-01 16:00:00", "2013-01-02 08:00:00", "2013-01-03 00:00:00"], + dtype=f"M8[{unit}]", + ) + + exp_intervals = IntervalIndex.from_arrays(left, right) + expected = Series(exp_intervals).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(Series(result), expected) + + +@pytest.mark.parametrize("box", [list, np.array, Index, Series]) +def test_datetime_tz_cut_mismatched_tzawareness(box): + # GH#54964 + bins = box( + [ + Timestamp("2013-01-01 04:57:07.200000"), + Timestamp("2013-01-01 21:00:00"), + Timestamp("2013-01-02 13:00:00"), + Timestamp("2013-01-03 05:00:00"), + ] + ) + ser = Series(date_range("20130101", periods=3, tz="US/Eastern")) + + msg = "Cannot use timezone-naive bins with timezone-aware values" + with pytest.raises(ValueError, match=msg): + cut(ser, bins) + + +@pytest.mark.parametrize( + "bins", + [ + 3, + [ + Timestamp("2013-01-01 04:57:07.200000", tz="UTC").tz_convert("US/Eastern"), + Timestamp("2013-01-01 21:00:00", tz="UTC").tz_convert("US/Eastern"), + Timestamp("2013-01-02 13:00:00", tz="UTC").tz_convert("US/Eastern"), + Timestamp("2013-01-03 05:00:00", tz="UTC").tz_convert("US/Eastern"), + ], + ], +) +@pytest.mark.parametrize("box", [list, np.array, Index, Series]) +def test_datetime_tz_cut(bins, box): + # see gh-19872 + tz = "US/Eastern" + ser = Series(date_range("20130101", periods=3, tz=tz)) + + if not isinstance(bins, int): + bins = box(bins) + + result = cut(ser, bins) + expected = Series( + IntervalIndex( + [ + Interval( + Timestamp("2012-12-31 23:57:07.200000", tz=tz), + Timestamp("2013-01-01 16:00:00", tz=tz), + ), + Interval( + Timestamp("2013-01-01 16:00:00", tz=tz), + Timestamp("2013-01-02 08:00:00", tz=tz), + ), + Interval( + Timestamp("2013-01-02 08:00:00", tz=tz), + Timestamp("2013-01-03 00:00:00", tz=tz), + ), + ] + ) + ).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(result, expected) + + +def test_datetime_nan_error(): + msg = "bins must be of datetime64 dtype" + + with pytest.raises(ValueError, match=msg): + cut(date_range("20130101", periods=3), bins=[0, 2, 4]) + + +def test_datetime_nan_mask(): + result = cut( + date_range("20130102", periods=5), bins=date_range("20130101", periods=2) + ) + + mask = result.categories.isna() + tm.assert_numpy_array_equal(mask, np.array([False])) + + mask = result.isna() + tm.assert_numpy_array_equal(mask, np.array([False, True, True, True, True])) + + +@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"]) +def test_datetime_cut_roundtrip(tz, unit): + # see gh-19891 + ser = Series(date_range("20180101", periods=3, tz=tz, unit=unit)) + result, result_bins = cut(ser, 2, retbins=True) + + expected = cut(ser, result_bins) + tm.assert_series_equal(result, expected) + + if unit == "s": + # TODO: constructing DatetimeIndex with dtype="M8[s]" without truncating + # the first entry here raises in array_to_datetime. Should truncate + # instead of raising? + # See https://github.com/pandas-dev/pandas/pull/56101#discussion_r1405325425 + # for why we round to 8 seconds instead of 7 + expected_bins = DatetimeIndex( + ["2017-12-31 23:57:08", "2018-01-02 00:00:00", "2018-01-03 00:00:00"], + dtype=f"M8[{unit}]", + ) + else: + expected_bins = DatetimeIndex( + [ + "2017-12-31 23:57:07.200000", + "2018-01-02 00:00:00", + "2018-01-03 00:00:00", + ], + dtype=f"M8[{unit}]", + ) + expected_bins = expected_bins.tz_localize(tz) + tm.assert_index_equal(result_bins, expected_bins) + + +def test_timedelta_cut_roundtrip(): + # see gh-19891 + ser = Series(timedelta_range("1day", periods=3)) + result, result_bins = cut(ser, 2, retbins=True) + + expected = cut(ser, result_bins) + tm.assert_series_equal(result, expected) + + expected_bins = TimedeltaIndex( + ["0 days 23:57:07.200000", "2 days 00:00:00", "3 days 00:00:00"] + ) + tm.assert_index_equal(result_bins, expected_bins) + + +@pytest.mark.parametrize("bins", [6, 7]) +@pytest.mark.parametrize( + "box, compare", + [ + (Series, tm.assert_series_equal), + (np.array, tm.assert_categorical_equal), + (list, tm.assert_equal), + ], +) +def test_cut_bool_coercion_to_int(bins, box, compare): + # issue 20303 + data_expected = box([0, 1, 1, 0, 1] * 10) + data_result = box([False, True, True, False, True] * 10) + expected = cut(data_expected, bins, duplicates="drop") + result = cut(data_result, bins, duplicates="drop") + compare(result, expected) + + +@pytest.mark.parametrize("labels", ["foo", 1, True]) +def test_cut_incorrect_labels(labels): + # GH 13318 + values = range(5) + msg = "Bin labels must either be False, None or passed in as a list-like argument" + with pytest.raises(ValueError, match=msg): + cut(values, 4, labels=labels) + + +@pytest.mark.parametrize("bins", [3, [0, 5, 15]]) +@pytest.mark.parametrize("right", [True, False]) +@pytest.mark.parametrize("include_lowest", [True, False]) +def test_cut_nullable_integer(bins, right, include_lowest): + a = np.random.default_rng(2).integers(0, 10, size=50).astype(float) + a[::2] = np.nan + result = cut( + pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest + ) + expected = cut(a, bins, right=right, include_lowest=include_lowest) + tm.assert_categorical_equal(result, expected) + + +@pytest.mark.parametrize( + "data, bins, labels, expected_codes, expected_labels", + [ + ([15, 17, 19], [14, 16, 18, 20], ["A", "B", "A"], [0, 1, 0], ["A", "B"]), + ([1, 3, 5], [0, 2, 4, 6, 8], [2, 0, 1, 2], [2, 0, 1], [0, 1, 2]), + ], +) +def test_cut_non_unique_labels(data, bins, labels, expected_codes, expected_labels): + # GH 33141 + result = cut(data, bins=bins, labels=labels, ordered=False) + expected = Categorical.from_codes( + expected_codes, categories=expected_labels, ordered=False + ) + tm.assert_categorical_equal(result, expected) + + +@pytest.mark.parametrize( + "data, bins, labels, expected_codes, expected_labels", + [ + ([15, 17, 19], [14, 16, 18, 20], ["C", "B", "A"], [0, 1, 2], ["C", "B", "A"]), + ([1, 3, 5], [0, 2, 4, 6, 8], [3, 0, 1, 2], [0, 1, 2], [3, 0, 1, 2]), + ], +) +def test_cut_unordered_labels(data, bins, labels, expected_codes, expected_labels): + # GH 33141 + result = cut(data, bins=bins, labels=labels, ordered=False) + expected = Categorical.from_codes( + expected_codes, categories=expected_labels, ordered=False + ) + tm.assert_categorical_equal(result, expected) + + +def test_cut_unordered_with_missing_labels_raises_error(): + # GH 33141 + msg = "'labels' must be provided if 'ordered = False'" + with pytest.raises(ValueError, match=msg): + cut([0.5, 3], bins=[0, 1, 2], ordered=False) + + +def test_cut_unordered_with_series_labels(): + # https://github.com/pandas-dev/pandas/issues/36603 + ser = Series([1, 2, 3, 4, 5]) + bins = Series([0, 2, 4, 6]) + labels = Series(["a", "b", "c"]) + result = cut(ser, bins=bins, labels=labels, ordered=False) + expected = Series(["a", "a", "b", "b", "c"], dtype="category") + tm.assert_series_equal(result, expected) + + +def test_cut_no_warnings(): + df = DataFrame({"value": np.random.default_rng(2).integers(0, 100, 20)}) + labels = [f"{i} - {i + 9}" for i in range(0, 100, 10)] + with tm.assert_produces_warning(False): + df["group"] = cut(df.value, range(0, 105, 10), right=False, labels=labels) + + +def test_cut_with_duplicated_index_lowest_included(): + # GH 42185 + expected = Series( + [Interval(-0.001, 2, closed="right")] * 3 + + [Interval(2, 4, closed="right"), Interval(-0.001, 2, closed="right")], + index=[0, 1, 2, 3, 0], + dtype="category", + ).cat.as_ordered() + + ser = Series([0, 1, 2, 3, 0], index=[0, 1, 2, 3, 0]) + result = cut(ser, bins=[0, 2, 4], include_lowest=True) + tm.assert_series_equal(result, expected) + + +def test_cut_with_nonexact_categorical_indices(): + # GH 42424 + + ser = Series(range(100)) + ser1 = cut(ser, 10).value_counts().head(5) + ser2 = cut(ser, 10).value_counts().tail(5) + result = DataFrame({"1": ser1, "2": ser2}) + + index = pd.CategoricalIndex( + [ + Interval(-0.099, 9.9, closed="right"), + Interval(9.9, 19.8, closed="right"), + Interval(19.8, 29.7, closed="right"), + Interval(29.7, 39.6, closed="right"), + Interval(39.6, 49.5, closed="right"), + Interval(49.5, 59.4, closed="right"), + Interval(59.4, 69.3, closed="right"), + Interval(69.3, 79.2, closed="right"), + Interval(79.2, 89.1, closed="right"), + Interval(89.1, 99, closed="right"), + ], + ordered=True, + ) + + expected = DataFrame( + {"1": [10] * 5 + [np.nan] * 5, "2": [np.nan] * 5 + [10] * 5}, index=index + ) + + tm.assert_frame_equal(expected, result) + + +def test_cut_with_timestamp_tuple_labels(): + # GH 40661 + labels = [(Timestamp(10),), (Timestamp(20),), (Timestamp(30),)] + result = cut([2, 4, 6], bins=[1, 3, 5, 7], labels=labels) + + expected = Categorical.from_codes([0, 1, 2], labels, ordered=True) + tm.assert_categorical_equal(result, expected) + + +def test_cut_bins_datetime_intervalindex(): + # https://github.com/pandas-dev/pandas/issues/46218 + bins = interval_range(Timestamp("2022-02-25"), Timestamp("2022-02-27"), freq="1D") + # passing Series instead of list is important to trigger bug + result = cut(Series([Timestamp("2022-02-26")]).astype("M8[ns]"), bins=bins) + expected = Categorical.from_codes([0], bins, ordered=True) + tm.assert_categorical_equal(result.array, expected) + + +def test_cut_with_nullable_int64(): + # GH 30787 + series = Series([0, 1, 2, 3, 4, pd.NA, 6, 7], dtype="Int64") + bins = [0, 2, 4, 6, 8] + intervals = IntervalIndex.from_breaks(bins) + + expected = Series( + Categorical.from_codes([-1, 0, 0, 1, 1, -1, 2, 3], intervals, ordered=True) + ) + + result = cut(series, bins=bins) + + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_get_dummies.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_get_dummies.py new file mode 100644 index 0000000000000000000000000000000000000000..31260e4dcb7d2ecf5f003ce4aadf890493b59887 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_get_dummies.py @@ -0,0 +1,743 @@ +import re +import unicodedata + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +from pandas import ( + ArrowDtype, + Categorical, + CategoricalDtype, + CategoricalIndex, + DataFrame, + Index, + RangeIndex, + Series, + SparseDtype, + get_dummies, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + +try: + import pyarrow as pa +except ImportError: + pa = None + + +class TestGetDummies: + @pytest.fixture + def df(self): + return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]}) + + @pytest.fixture(params=["uint8", "i8", np.float64, bool, None]) + def dtype(self, request): + return np.dtype(request.param) + + @pytest.fixture(params=["dense", "sparse"]) + def sparse(self, request): + # params are strings to simplify reading test results, + # e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True] + return request.param == "sparse" + + def effective_dtype(self, dtype): + if dtype is None: + return np.uint8 + return dtype + + def test_get_dummies_raises_on_dtype_object(self, df): + msg = "dtype=object is not a valid dtype for get_dummies" + with pytest.raises(ValueError, match=msg): + get_dummies(df, dtype="object") + + def test_get_dummies_basic(self, sparse, dtype): + s_list = list("abc") + s_series = Series(s_list) + s_series_index = Series(s_list, list("ABC")) + + expected = DataFrame( + {"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]}, + dtype=self.effective_dtype(dtype), + ) + if sparse: + if dtype.kind == "b": + expected = expected.apply(SparseArray, fill_value=False) + else: + expected = expected.apply(SparseArray, fill_value=0.0) + result = get_dummies(s_list, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_series, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + expected.index = list("ABC") + result = get_dummies(s_series_index, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_basic_types(self, sparse, dtype, using_infer_string): + # GH 10531 + s_list = list("abc") + s_series = Series(s_list) + s_df = DataFrame( + {"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]} + ) + + expected = DataFrame( + {"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]}, + dtype=self.effective_dtype(dtype), + columns=list("abc"), + ) + if sparse: + if is_integer_dtype(dtype): + fill_value = 0 + elif dtype == bool: + fill_value = False + else: + fill_value = 0.0 + + expected = expected.apply(SparseArray, fill_value=fill_value) + result = get_dummies(s_list, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_series, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype) + if sparse: + dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]" + else: + dtype_name = self.effective_dtype(dtype).name + + expected = Series({dtype_name: 8}, name="count") + result = result.dtypes.value_counts() + result.index = [str(i) for i in result.index] + tm.assert_series_equal(result, expected) + + result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype) + + key = "string" if using_infer_string else "object" + expected_counts = {"int64": 1, key: 1} + expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0) + + expected = Series(expected_counts, name="count").sort_index() + result = result.dtypes.value_counts() + result.index = [str(i) for i in result.index] + result = result.sort_index() + tm.assert_series_equal(result, expected) + + def test_get_dummies_just_na(self, sparse): + just_na_list = [np.nan] + just_na_series = Series(just_na_list) + just_na_series_index = Series(just_na_list, index=["A"]) + + res_list = get_dummies(just_na_list, sparse=sparse) + res_series = get_dummies(just_na_series, sparse=sparse) + res_series_index = get_dummies(just_na_series_index, sparse=sparse) + + assert res_list.empty + assert res_series.empty + assert res_series_index.empty + + assert res_list.index.tolist() == [0] + assert res_series.index.tolist() == [0] + assert res_series_index.index.tolist() == ["A"] + + def test_get_dummies_include_na(self, sparse, dtype): + s = ["a", "b", np.nan] + res = get_dummies(s, sparse=sparse, dtype=dtype) + exp = DataFrame( + {"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype) + ) + if sparse: + if dtype.kind == "b": + exp = exp.apply(SparseArray, fill_value=False) + else: + exp = exp.apply(SparseArray, fill_value=0.0) + tm.assert_frame_equal(res, exp) + + # Sparse dataframes do not allow nan labelled columns, see #GH8822 + res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype) + exp_na = DataFrame( + {np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]}, + dtype=self.effective_dtype(dtype), + ) + exp_na = exp_na.reindex(["a", "b", np.nan], axis=1) + # hack (NaN handling in assert_index_equal) + exp_na.columns = res_na.columns + if sparse: + if dtype.kind == "b": + exp_na = exp_na.apply(SparseArray, fill_value=False) + else: + exp_na = exp_na.apply(SparseArray, fill_value=0.0) + tm.assert_frame_equal(res_na, exp_na) + + res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype) + exp_just_na = DataFrame( + Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype) + ) + tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values) + + def test_get_dummies_unicode(self, sparse): + # See GH 6885 - get_dummies chokes on unicode values + e = "e" + eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE") + s = [e, eacute, eacute] + res = get_dummies(s, prefix="letter", sparse=sparse) + exp = DataFrame( + {"letter_e": [True, False, False], f"letter_{eacute}": [False, True, True]} + ) + if sparse: + exp = exp.apply(SparseArray, fill_value=False) + tm.assert_frame_equal(res, exp) + + def test_dataframe_dummies_all_obj(self, df, sparse): + df = df[["A", "B"]] + result = get_dummies(df, sparse=sparse) + expected = DataFrame( + {"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]}, + dtype=bool, + ) + if sparse: + expected = DataFrame( + { + "A_a": SparseArray([1, 0, 1], dtype="bool"), + "A_b": SparseArray([0, 1, 0], dtype="bool"), + "B_b": SparseArray([1, 1, 0], dtype="bool"), + "B_c": SparseArray([0, 0, 1], dtype="bool"), + } + ) + + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_string_dtype(self, df, using_infer_string): + # GH44965 + df = df[["A", "B"]] + df = df.astype({"A": "object", "B": "string"}) + result = get_dummies(df) + expected = DataFrame( + { + "A_a": [1, 0, 1], + "A_b": [0, 1, 0], + "B_b": [1, 1, 0], + "B_c": [0, 0, 1], + }, + dtype=bool, + ) + if not using_infer_string: + # infer_string returns numpy bools + expected[["B_b", "B_c"]] = expected[["B_b", "B_c"]].astype("boolean") + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_mix_default(self, df, sparse, dtype): + result = get_dummies(df, sparse=sparse, dtype=dtype) + if sparse: + arr = SparseArray + if dtype.kind == "b": + typ = SparseDtype(dtype, False) + else: + typ = SparseDtype(dtype, 0) + else: + arr = np.array + typ = dtype + expected = DataFrame( + { + "C": [1, 2, 3], + "A_a": arr([1, 0, 1], dtype=typ), + "A_b": arr([0, 1, 0], dtype=typ), + "B_b": arr([1, 1, 0], dtype=typ), + "B_c": arr([0, 0, 1], dtype=typ), + } + ) + expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]] + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_prefix_list(self, df, sparse): + prefixes = ["from_A", "from_B"] + result = get_dummies(df, prefix=prefixes, sparse=sparse) + expected = DataFrame( + { + "C": [1, 2, 3], + "from_A_a": [True, False, True], + "from_A_b": [False, True, False], + "from_B_b": [True, True, False], + "from_B_c": [False, False, True], + }, + ) + expected[["C"]] = df[["C"]] + cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"] + expected = expected[["C"] + cols] + + typ = SparseArray if sparse else Series + expected[cols] = expected[cols].apply(lambda x: typ(x)) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_prefix_str(self, df, sparse): + # not that you should do this... + result = get_dummies(df, prefix="bad", sparse=sparse) + bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"] + expected = DataFrame( + [ + [1, True, False, True, False], + [2, False, True, True, False], + [3, True, False, False, True], + ], + columns=["C"] + bad_columns, + ) + expected = expected.astype({"C": np.int64}) + if sparse: + # work around astyping & assigning with duplicate columns + # https://github.com/pandas-dev/pandas/issues/14427 + expected = pd.concat( + [ + Series([1, 2, 3], name="C"), + Series([True, False, True], name="bad_a", dtype="Sparse[bool]"), + Series([False, True, False], name="bad_b", dtype="Sparse[bool]"), + Series([True, True, False], name="bad_b", dtype="Sparse[bool]"), + Series([False, False, True], name="bad_c", dtype="Sparse[bool]"), + ], + axis=1, + ) + + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_subset(self, df, sparse): + result = get_dummies(df, prefix=["from_A"], columns=["A"], sparse=sparse) + expected = DataFrame( + { + "B": ["b", "b", "c"], + "C": [1, 2, 3], + "from_A_a": [1, 0, 1], + "from_A_b": [0, 1, 0], + }, + ) + cols = expected.columns + expected[cols[1:]] = expected[cols[1:]].astype(bool) + expected[["C"]] = df[["C"]] + if sparse: + cols = ["from_A_a", "from_A_b"] + expected[cols] = expected[cols].astype(SparseDtype("bool", False)) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_prefix_sep(self, df, sparse): + result = get_dummies(df, prefix_sep="..", sparse=sparse) + expected = DataFrame( + { + "C": [1, 2, 3], + "A..a": [True, False, True], + "A..b": [False, True, False], + "B..b": [True, True, False], + "B..c": [False, False, True], + }, + ) + expected[["C"]] = df[["C"]] + expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]] + if sparse: + cols = ["A..a", "A..b", "B..b", "B..c"] + expected[cols] = expected[cols].astype(SparseDtype("bool", False)) + + tm.assert_frame_equal(result, expected) + + result = get_dummies(df, prefix_sep=["..", "__"], sparse=sparse) + expected = expected.rename(columns={"B..b": "B__b", "B..c": "B__c"}) + tm.assert_frame_equal(result, expected) + + result = get_dummies(df, prefix_sep={"A": "..", "B": "__"}, sparse=sparse) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_prefix_bad_length(self, df, sparse): + msg = re.escape( + "Length of 'prefix' (1) did not match the length of the columns being " + "encoded (2)" + ) + with pytest.raises(ValueError, match=msg): + get_dummies(df, prefix=["too few"], sparse=sparse) + + def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse): + msg = re.escape( + "Length of 'prefix_sep' (1) did not match the length of the columns being " + "encoded (2)" + ) + with pytest.raises(ValueError, match=msg): + get_dummies(df, prefix_sep=["bad"], sparse=sparse) + + def test_dataframe_dummies_prefix_dict(self, sparse): + prefixes = {"A": "from_A", "B": "from_B"} + df = DataFrame({"C": [1, 2, 3], "A": ["a", "b", "a"], "B": ["b", "b", "c"]}) + result = get_dummies(df, prefix=prefixes, sparse=sparse) + + expected = DataFrame( + { + "C": [1, 2, 3], + "from_A_a": [1, 0, 1], + "from_A_b": [0, 1, 0], + "from_B_b": [1, 1, 0], + "from_B_c": [0, 0, 1], + } + ) + + columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"] + expected[columns] = expected[columns].astype(bool) + if sparse: + expected[columns] = expected[columns].astype(SparseDtype("bool", False)) + + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_with_na(self, df, sparse, dtype): + df.loc[3, :] = [np.nan, np.nan, np.nan] + result = get_dummies(df, dummy_na=True, sparse=sparse, dtype=dtype).sort_index( + axis=1 + ) + + if sparse: + arr = SparseArray + if dtype.kind == "b": + typ = SparseDtype(dtype, False) + else: + typ = SparseDtype(dtype, 0) + else: + arr = np.array + typ = dtype + + expected = DataFrame( + { + "C": [1, 2, 3, np.nan], + "A_a": arr([1, 0, 1, 0], dtype=typ), + "A_b": arr([0, 1, 0, 0], dtype=typ), + "A_nan": arr([0, 0, 0, 1], dtype=typ), + "B_b": arr([1, 1, 0, 0], dtype=typ), + "B_c": arr([0, 0, 1, 0], dtype=typ), + "B_nan": arr([0, 0, 0, 1], dtype=typ), + } + ).sort_index(axis=1) + + tm.assert_frame_equal(result, expected) + + result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype) + expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]] + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_with_categorical(self, df, sparse, dtype): + df["cat"] = Categorical(["x", "y", "y"]) + result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1) + if sparse: + arr = SparseArray + if dtype.kind == "b": + typ = SparseDtype(dtype, False) + else: + typ = SparseDtype(dtype, 0) + else: + arr = np.array + typ = dtype + + expected = DataFrame( + { + "C": [1, 2, 3], + "A_a": arr([1, 0, 1], dtype=typ), + "A_b": arr([0, 1, 0], dtype=typ), + "B_b": arr([1, 1, 0], dtype=typ), + "B_c": arr([0, 0, 1], dtype=typ), + "cat_x": arr([1, 0, 0], dtype=typ), + "cat_y": arr([0, 1, 1], dtype=typ), + } + ).sort_index(axis=1) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "get_dummies_kwargs,expected", + [ + ( + {"data": DataFrame({"ä": ["a"]})}, + DataFrame({"ä_a": [True]}), + ), + ( + {"data": DataFrame({"x": ["ä"]})}, + DataFrame({"x_ä": [True]}), + ), + ( + {"data": DataFrame({"x": ["a"]}), "prefix": "ä"}, + DataFrame({"ä_a": [True]}), + ), + ( + {"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"}, + DataFrame({"xäa": [True]}), + ), + ], + ) + def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected): + # GH22084 get_dummies incorrectly encodes unicode characters + # in dataframe column names + result = get_dummies(**get_dummies_kwargs) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_basic_drop_first(self, sparse): + # GH12402 Add a new parameter `drop_first` to avoid collinearity + # Basic case + s_list = list("abc") + s_series = Series(s_list) + s_series_index = Series(s_list, list("ABC")) + + expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=bool) + + result = get_dummies(s_list, drop_first=True, sparse=sparse) + if sparse: + expected = expected.apply(SparseArray, fill_value=False) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_series, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + expected.index = list("ABC") + result = get_dummies(s_series_index, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_basic_drop_first_one_level(self, sparse): + # Test the case that categorical variable only has one level. + s_list = list("aaa") + s_series = Series(s_list) + s_series_index = Series(s_list, list("ABC")) + + expected = DataFrame(index=RangeIndex(3)) + + result = get_dummies(s_list, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_series, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + expected = DataFrame(index=list("ABC")) + result = get_dummies(s_series_index, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_basic_drop_first_NA(self, sparse): + # Test NA handling together with drop_first + s_NA = ["a", "b", np.nan] + res = get_dummies(s_NA, drop_first=True, sparse=sparse) + exp = DataFrame({"b": [0, 1, 0]}, dtype=bool) + if sparse: + exp = exp.apply(SparseArray, fill_value=False) + + tm.assert_frame_equal(res, exp) + + res_na = get_dummies(s_NA, dummy_na=True, drop_first=True, sparse=sparse) + exp_na = DataFrame({"b": [0, 1, 0], np.nan: [0, 0, 1]}, dtype=bool).reindex( + ["b", np.nan], axis=1 + ) + if sparse: + exp_na = exp_na.apply(SparseArray, fill_value=False) + tm.assert_frame_equal(res_na, exp_na) + + res_just_na = get_dummies( + [np.nan], dummy_na=True, drop_first=True, sparse=sparse + ) + exp_just_na = DataFrame(index=RangeIndex(1)) + tm.assert_frame_equal(res_just_na, exp_just_na) + + def test_dataframe_dummies_drop_first(self, df, sparse): + df = df[["A", "B"]] + result = get_dummies(df, drop_first=True, sparse=sparse) + expected = DataFrame({"A_b": [0, 1, 0], "B_c": [0, 0, 1]}, dtype=bool) + if sparse: + expected = expected.apply(SparseArray, fill_value=False) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype): + df["cat"] = Categorical(["x", "y", "y"]) + result = get_dummies(df, drop_first=True, sparse=sparse) + expected = DataFrame( + {"C": [1, 2, 3], "A_b": [0, 1, 0], "B_c": [0, 0, 1], "cat_y": [0, 1, 1]} + ) + cols = ["A_b", "B_c", "cat_y"] + expected[cols] = expected[cols].astype(bool) + expected = expected[["C", "A_b", "B_c", "cat_y"]] + if sparse: + for col in cols: + expected[col] = SparseArray(expected[col]) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_drop_first_with_na(self, df, sparse): + df.loc[3, :] = [np.nan, np.nan, np.nan] + result = get_dummies( + df, dummy_na=True, drop_first=True, sparse=sparse + ).sort_index(axis=1) + expected = DataFrame( + { + "C": [1, 2, 3, np.nan], + "A_b": [0, 1, 0, 0], + "A_nan": [0, 0, 0, 1], + "B_c": [0, 0, 1, 0], + "B_nan": [0, 0, 0, 1], + } + ) + cols = ["A_b", "A_nan", "B_c", "B_nan"] + expected[cols] = expected[cols].astype(bool) + expected = expected.sort_index(axis=1) + if sparse: + for col in cols: + expected[col] = SparseArray(expected[col]) + + tm.assert_frame_equal(result, expected) + + result = get_dummies(df, dummy_na=False, drop_first=True, sparse=sparse) + expected = expected[["C", "A_b", "B_c"]] + tm.assert_frame_equal(result, expected) + + def test_get_dummies_int_int(self): + data = Series([1, 2, 1]) + result = get_dummies(data) + expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2], dtype=bool) + tm.assert_frame_equal(result, expected) + + data = Series(Categorical(["a", "b", "a"])) + result = get_dummies(data) + expected = DataFrame( + [[1, 0], [0, 1], [1, 0]], columns=Categorical(["a", "b"]), dtype=bool + ) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_int_df(self, dtype): + data = DataFrame( + { + "A": [1, 2, 1], + "B": Categorical(["a", "b", "a"]), + "C": [1, 2, 1], + "D": [1.0, 2.0, 1.0], + } + ) + columns = ["C", "D", "A_1", "A_2", "B_a", "B_b"] + expected = DataFrame( + [[1, 1.0, 1, 0, 1, 0], [2, 2.0, 0, 1, 0, 1], [1, 1.0, 1, 0, 1, 0]], + columns=columns, + ) + expected[columns[2:]] = expected[columns[2:]].astype(dtype) + result = get_dummies(data, columns=["A", "B"], dtype=dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_dataframe_dummies_preserve_categorical_dtype(self, dtype, ordered): + # GH13854 + cat = Categorical(list("xy"), categories=list("xyz"), ordered=ordered) + result = get_dummies(cat, dtype=dtype) + + data = np.array([[1, 0, 0], [0, 1, 0]], dtype=self.effective_dtype(dtype)) + cols = CategoricalIndex( + cat.categories, categories=cat.categories, ordered=ordered + ) + expected = DataFrame(data, columns=cols, dtype=self.effective_dtype(dtype)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("sparse", [True, False]) + def test_get_dummies_dont_sparsify_all_columns(self, sparse): + # GH18914 + df = DataFrame.from_dict({"GDP": [1, 2], "Nation": ["AB", "CD"]}) + df = get_dummies(df, columns=["Nation"], sparse=sparse) + df2 = df.reindex(columns=["GDP"]) + + tm.assert_frame_equal(df[["GDP"]], df2) + + def test_get_dummies_duplicate_columns(self, df): + # GH20839 + df.columns = ["A", "A", "A"] + result = get_dummies(df).sort_index(axis=1) + + expected = DataFrame( + [ + [1, True, False, True, False], + [2, False, True, True, False], + [3, True, False, False, True], + ], + columns=["A", "A_a", "A_b", "A_b", "A_c"], + ).sort_index(axis=1) + + expected = expected.astype({"A": np.int64}) + + tm.assert_frame_equal(result, expected) + + def test_get_dummies_all_sparse(self): + df = DataFrame({"A": [1, 2]}) + result = get_dummies(df, columns=["A"], sparse=True) + dtype = SparseDtype("bool", False) + expected = DataFrame( + { + "A_1": SparseArray([1, 0], dtype=dtype), + "A_2": SparseArray([0, 1], dtype=dtype), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("values", ["baz"]) + def test_get_dummies_with_string_values(self, values): + # issue #28383 + df = DataFrame( + { + "bar": [1, 2, 3, 4, 5, 6], + "foo": ["one", "one", "one", "two", "two", "two"], + "baz": ["A", "B", "C", "A", "B", "C"], + "zoo": ["x", "y", "z", "q", "w", "t"], + } + ) + + msg = "Input must be a list-like for parameter `columns`" + + with pytest.raises(TypeError, match=msg): + get_dummies(df, columns=values) + + def test_get_dummies_ea_dtype_series(self, any_numeric_ea_and_arrow_dtype): + # GH#32430 + ser = Series(list("abca")) + result = get_dummies(ser, dtype=any_numeric_ea_and_arrow_dtype) + expected = DataFrame( + {"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], "c": [0, 0, 1, 0]}, + dtype=any_numeric_ea_and_arrow_dtype, + ) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_ea_dtype_dataframe(self, any_numeric_ea_and_arrow_dtype): + # GH#32430 + df = DataFrame({"x": list("abca")}) + result = get_dummies(df, dtype=any_numeric_ea_and_arrow_dtype) + expected = DataFrame( + {"x_a": [1, 0, 0, 1], "x_b": [0, 1, 0, 0], "x_c": [0, 0, 1, 0]}, + dtype=any_numeric_ea_and_arrow_dtype, + ) + tm.assert_frame_equal(result, expected) + + @td.skip_if_no("pyarrow") + def test_get_dummies_ea_dtype(self): + # GH#56273 + for dtype, exp_dtype in [ + ("string[pyarrow]", "boolean"), + ("string[pyarrow_numpy]", "bool"), + (CategoricalDtype(Index(["a"], dtype="string[pyarrow]")), "boolean"), + (CategoricalDtype(Index(["a"], dtype="string[pyarrow_numpy]")), "bool"), + ]: + df = DataFrame({"name": Series(["a"], dtype=dtype), "x": 1}) + result = get_dummies(df) + expected = DataFrame({"x": 1, "name_a": Series([True], dtype=exp_dtype)}) + tm.assert_frame_equal(result, expected) + + @td.skip_if_no("pyarrow") + def test_get_dummies_arrow_dtype(self): + # GH#56273 + df = DataFrame({"name": Series(["a"], dtype=ArrowDtype(pa.string())), "x": 1}) + result = get_dummies(df) + expected = DataFrame({"x": 1, "name_a": Series([True], dtype="bool[pyarrow]")}) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + { + "name": Series( + ["a"], + dtype=CategoricalDtype(Index(["a"], dtype=ArrowDtype(pa.string()))), + ), + "x": 1, + } + ) + result = get_dummies(df) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_melt.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_melt.py new file mode 100644 index 0000000000000000000000000000000000000000..272c5b34032937215b3ffff45ea10a9e31048041 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_melt.py @@ -0,0 +1,1252 @@ +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + date_range, + lreshape, + melt, + wide_to_long, +) +import pandas._testing as tm + + +@pytest.fixture +def df(): + res = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + res["id1"] = (res["A"] > 0).astype(np.int64) + res["id2"] = (res["B"] > 0).astype(np.int64) + return res + + +@pytest.fixture +def df1(): + res = DataFrame( + [ + [1.067683, -1.110463, 0.20867], + [-1.321405, 0.368915, -1.055342], + [-0.807333, 0.08298, -0.873361], + ] + ) + res.columns = [list("ABC"), list("abc")] + res.columns.names = ["CAP", "low"] + return res + + +@pytest.fixture +def var_name(): + return "var" + + +@pytest.fixture +def value_name(): + return "val" + + +class TestMelt: + def test_top_level_method(self, df): + result = melt(df) + assert result.columns.tolist() == ["variable", "value"] + + def test_method_signatures(self, df, df1, var_name, value_name): + tm.assert_frame_equal(df.melt(), melt(df)) + + tm.assert_frame_equal( + df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]), + melt(df, id_vars=["id1", "id2"], value_vars=["A", "B"]), + ) + + tm.assert_frame_equal( + df.melt(var_name=var_name, value_name=value_name), + melt(df, var_name=var_name, value_name=value_name), + ) + + tm.assert_frame_equal(df1.melt(col_level=0), melt(df1, col_level=0)) + + def test_default_col_names(self, df): + result = df.melt() + assert result.columns.tolist() == ["variable", "value"] + + result1 = df.melt(id_vars=["id1"]) + assert result1.columns.tolist() == ["id1", "variable", "value"] + + result2 = df.melt(id_vars=["id1", "id2"]) + assert result2.columns.tolist() == ["id1", "id2", "variable", "value"] + + def test_value_vars(self, df): + result3 = df.melt(id_vars=["id1", "id2"], value_vars="A") + assert len(result3) == 10 + + result4 = df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]) + expected4 = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + "variable": ["A"] * 10 + ["B"] * 10, + "value": (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", "variable", "value"], + ) + tm.assert_frame_equal(result4, expected4) + + @pytest.mark.parametrize("type_", (tuple, list, np.array)) + def test_value_vars_types(self, type_, df): + # GH 15348 + expected = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + "variable": ["A"] * 10 + ["B"] * 10, + "value": (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", "variable", "value"], + ) + result = df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B"))) + tm.assert_frame_equal(result, expected) + + def test_vars_work_with_multiindex(self, df1): + expected = DataFrame( + { + ("A", "a"): df1[("A", "a")], + "CAP": ["B"] * len(df1), + "low": ["b"] * len(df1), + "value": df1[("B", "b")], + }, + columns=[("A", "a"), "CAP", "low", "value"], + ) + + result = df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "id_vars, value_vars, col_level, expected", + [ + ( + ["A"], + ["B"], + 0, + DataFrame( + { + "A": {0: 1.067683, 1: -1.321405, 2: -0.807333}, + "CAP": {0: "B", 1: "B", 2: "B"}, + "value": {0: -1.110463, 1: 0.368915, 2: 0.08298}, + } + ), + ), + ( + ["a"], + ["b"], + 1, + DataFrame( + { + "a": {0: 1.067683, 1: -1.321405, 2: -0.807333}, + "low": {0: "b", 1: "b", 2: "b"}, + "value": {0: -1.110463, 1: 0.368915, 2: 0.08298}, + } + ), + ), + ], + ) + def test_single_vars_work_with_multiindex( + self, id_vars, value_vars, col_level, expected, df1 + ): + result = df1.melt(id_vars, value_vars, col_level=col_level) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "id_vars, value_vars", + [ + [("A", "a"), [("B", "b")]], + [[("A", "a")], ("B", "b")], + [("A", "a"), ("B", "b")], + ], + ) + def test_tuple_vars_fail_with_multiindex(self, id_vars, value_vars, df1): + # melt should fail with an informative error message if + # the columns have a MultiIndex and a tuple is passed + # for id_vars or value_vars. + msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex" + with pytest.raises(ValueError, match=msg): + df1.melt(id_vars=id_vars, value_vars=value_vars) + + def test_custom_var_name(self, df, var_name): + result5 = df.melt(var_name=var_name) + assert result5.columns.tolist() == ["var", "value"] + + result6 = df.melt(id_vars=["id1"], var_name=var_name) + assert result6.columns.tolist() == ["id1", "var", "value"] + + result7 = df.melt(id_vars=["id1", "id2"], var_name=var_name) + assert result7.columns.tolist() == ["id1", "id2", "var", "value"] + + result8 = df.melt(id_vars=["id1", "id2"], value_vars="A", var_name=var_name) + assert result8.columns.tolist() == ["id1", "id2", "var", "value"] + + result9 = df.melt( + id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=var_name + ) + expected9 = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + var_name: ["A"] * 10 + ["B"] * 10, + "value": (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", var_name, "value"], + ) + tm.assert_frame_equal(result9, expected9) + + def test_custom_value_name(self, df, value_name): + result10 = df.melt(value_name=value_name) + assert result10.columns.tolist() == ["variable", "val"] + + result11 = df.melt(id_vars=["id1"], value_name=value_name) + assert result11.columns.tolist() == ["id1", "variable", "val"] + + result12 = df.melt(id_vars=["id1", "id2"], value_name=value_name) + assert result12.columns.tolist() == ["id1", "id2", "variable", "val"] + + result13 = df.melt( + id_vars=["id1", "id2"], value_vars="A", value_name=value_name + ) + assert result13.columns.tolist() == ["id1", "id2", "variable", "val"] + + result14 = df.melt( + id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=value_name + ) + expected14 = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + "variable": ["A"] * 10 + ["B"] * 10, + value_name: (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", "variable", value_name], + ) + tm.assert_frame_equal(result14, expected14) + + def test_custom_var_and_value_name(self, df, value_name, var_name): + result15 = df.melt(var_name=var_name, value_name=value_name) + assert result15.columns.tolist() == ["var", "val"] + + result16 = df.melt(id_vars=["id1"], var_name=var_name, value_name=value_name) + assert result16.columns.tolist() == ["id1", "var", "val"] + + result17 = df.melt( + id_vars=["id1", "id2"], var_name=var_name, value_name=value_name + ) + assert result17.columns.tolist() == ["id1", "id2", "var", "val"] + + result18 = df.melt( + id_vars=["id1", "id2"], + value_vars="A", + var_name=var_name, + value_name=value_name, + ) + assert result18.columns.tolist() == ["id1", "id2", "var", "val"] + + result19 = df.melt( + id_vars=["id1", "id2"], + value_vars=["A", "B"], + var_name=var_name, + value_name=value_name, + ) + expected19 = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + var_name: ["A"] * 10 + ["B"] * 10, + value_name: (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", var_name, value_name], + ) + tm.assert_frame_equal(result19, expected19) + + df20 = df.copy() + df20.columns.name = "foo" + result20 = df20.melt() + assert result20.columns.tolist() == ["foo", "value"] + + @pytest.mark.parametrize("col_level", [0, "CAP"]) + def test_col_level(self, col_level, df1): + res = df1.melt(col_level=col_level) + assert res.columns.tolist() == ["CAP", "value"] + + def test_multiindex(self, df1): + res = df1.melt() + assert res.columns.tolist() == ["CAP", "low", "value"] + + @pytest.mark.parametrize( + "col", + [ + pd.Series(date_range("2010", periods=5, tz="US/Pacific")), + pd.Series(["a", "b", "c", "a", "d"], dtype="category"), + pd.Series([0, 1, 0, 0, 0]), + ], + ) + def test_pandas_dtypes(self, col): + # GH 15785 + df = DataFrame( + {"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col} + ) + expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True) + result = melt( + df, id_vars=["klass", "col"], var_name="attribute", value_name="value" + ) + expected = DataFrame( + { + 0: list(range(5)) * 2, + 1: pd.concat([col] * 2, ignore_index=True), + 2: ["attr1"] * 5 + ["attr2"] * 5, + 3: expected_value, + } + ) + expected.columns = ["klass", "col", "attribute", "value"] + tm.assert_frame_equal(result, expected) + + def test_preserve_category(self): + # GH 15853 + data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])}) + result = melt(data, ["B"], ["A"]) + expected = DataFrame( + {"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]} + ) + + tm.assert_frame_equal(result, expected) + + def test_melt_missing_columns_raises(self): + # GH-23575 + # This test is to ensure that pandas raises an error if melting is + # attempted with column names absent from the dataframe + + # Generate data + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd") + ) + + # Try to melt with missing `value_vars` column name + msg = "The following id_vars or value_vars are not present in the DataFrame:" + with pytest.raises(KeyError, match=msg): + df.melt(["a", "b"], ["C", "d"]) + + # Try to melt with missing `id_vars` column name + with pytest.raises(KeyError, match=msg): + df.melt(["A", "b"], ["c", "d"]) + + # Multiple missing + with pytest.raises( + KeyError, + match=msg, + ): + df.melt(["a", "b", "not_here", "or_there"], ["c", "d"]) + + # Multiindex melt fails if column is missing from multilevel melt + multi = df.copy() + multi.columns = [list("ABCD"), list("abcd")] + with pytest.raises(KeyError, match=msg): + multi.melt([("E", "a")], [("B", "b")]) + # Multiindex fails if column is missing from single level melt + with pytest.raises(KeyError, match=msg): + multi.melt(["A"], ["F"], col_level=0) + + def test_melt_mixed_int_str_id_vars(self): + # GH 29718 + df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]}) + result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"]) + expected = DataFrame( + {0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]} + ) + tm.assert_frame_equal(result, expected) + + def test_melt_mixed_int_str_value_vars(self): + # GH 29718 + df = DataFrame({0: ["foo"], "a": ["bar"]}) + result = melt(df, value_vars=[0, "a"]) + expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]}) + tm.assert_frame_equal(result, expected) + + def test_ignore_index(self): + # GH 17440 + df = DataFrame({"foo": [0], "bar": [1]}, index=["first"]) + result = melt(df, ignore_index=False) + expected = DataFrame( + {"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"] + ) + tm.assert_frame_equal(result, expected) + + def test_ignore_multiindex(self): + # GH 17440 + index = pd.MultiIndex.from_tuples( + [("first", "second"), ("first", "third")], names=["baz", "foobar"] + ) + df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index) + result = melt(df, ignore_index=False) + + expected_index = pd.MultiIndex.from_tuples( + [("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"] + ) + expected = DataFrame( + {"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]}, + index=expected_index, + ) + + tm.assert_frame_equal(result, expected) + + def test_ignore_index_name_and_type(self): + # GH 17440 + index = Index(["foo", "bar"], dtype="category", name="baz") + df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index) + result = melt(df, ignore_index=False) + + expected_index = Index(["foo", "bar"] * 2, dtype="category", name="baz") + expected = DataFrame( + {"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]}, + index=expected_index, + ) + + tm.assert_frame_equal(result, expected) + + def test_melt_with_duplicate_columns(self): + # GH#41951 + df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"]) + result = df.melt(id_vars=["a"], value_vars=["b"]) + expected = DataFrame( + [["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"] + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["Int8", "Int64"]) + def test_melt_ea_dtype(self, dtype): + # GH#41570 + df = DataFrame( + { + "a": pd.Series([1, 2], dtype="Int8"), + "b": pd.Series([3, 4], dtype=dtype), + } + ) + result = df.melt() + expected = DataFrame( + { + "variable": ["a", "a", "b", "b"], + "value": pd.Series([1, 2, 3, 4], dtype=dtype), + } + ) + tm.assert_frame_equal(result, expected) + + def test_melt_ea_columns(self): + # GH 54297 + df = DataFrame( + { + "A": {0: "a", 1: "b", 2: "c"}, + "B": {0: 1, 1: 3, 2: 5}, + "C": {0: 2, 1: 4, 2: 6}, + } + ) + df.columns = df.columns.astype("string[python]") + result = df.melt(id_vars=["A"], value_vars=["B"]) + expected = DataFrame( + { + "A": list("abc"), + "variable": pd.Series(["B"] * 3, dtype="string[python]"), + "value": [1, 3, 5], + } + ) + tm.assert_frame_equal(result, expected) + + def test_melt_preserves_datetime(self): + df = DataFrame( + data=[ + { + "type": "A0", + "start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"), + "end_date": pd.Timestamp("2023/03/10", tz="Asia/Tokyo"), + }, + { + "type": "A1", + "start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"), + "end_date": pd.Timestamp("2023/03/11", tz="Asia/Tokyo"), + }, + ], + index=["aaaa", "bbbb"], + ) + result = df.melt( + id_vars=["type"], + value_vars=["start_date", "end_date"], + var_name="start/end", + value_name="date", + ) + expected = DataFrame( + { + "type": {0: "A0", 1: "A1", 2: "A0", 3: "A1"}, + "start/end": { + 0: "start_date", + 1: "start_date", + 2: "end_date", + 3: "end_date", + }, + "date": { + 0: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"), + 1: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"), + 2: pd.Timestamp("2023-03-10 00:00:00+0900", tz="Asia/Tokyo"), + 3: pd.Timestamp("2023-03-11 00:00:00+0900", tz="Asia/Tokyo"), + }, + } + ) + tm.assert_frame_equal(result, expected) + + def test_melt_allows_non_scalar_id_vars(self): + df = DataFrame( + data={"a": [1, 2, 3], "b": [4, 5, 6]}, + index=["11", "22", "33"], + ) + result = df.melt( + id_vars="a", + var_name=0, + value_name=1, + ) + expected = DataFrame({"a": [1, 2, 3], 0: ["b"] * 3, 1: [4, 5, 6]}) + tm.assert_frame_equal(result, expected) + + def test_melt_allows_non_string_var_name(self): + df = DataFrame( + data={"a": [1, 2, 3], "b": [4, 5, 6]}, + index=["11", "22", "33"], + ) + result = df.melt( + id_vars=["a"], + var_name=0, + value_name=1, + ) + expected = DataFrame({"a": [1, 2, 3], 0: ["b"] * 3, 1: [4, 5, 6]}) + tm.assert_frame_equal(result, expected) + + def test_melt_non_scalar_var_name_raises(self): + df = DataFrame( + data={"a": [1, 2, 3], "b": [4, 5, 6]}, + index=["11", "22", "33"], + ) + with pytest.raises(ValueError, match=r".* must be a scalar."): + df.melt(id_vars=["a"], var_name=[1, 2]) + + +class TestLreshape: + def test_pairs(self): + data = { + "birthdt": [ + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + ], + "birthwt": [1766, 3301, 1454, 3139, 4133], + "id": [101, 102, 103, 104, 105], + "sex": ["Male", "Female", "Female", "Female", "Female"], + "visitdt1": [ + "11jan2009", + "22dec2008", + "04jan2009", + "29dec2008", + "20jan2009", + ], + "visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"], + "visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"], + "wt1": [1823, 3338, 1549, 3298, 4306], + "wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0], + "wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0], + } + + df = DataFrame(data) + + spec = { + "visitdt": [f"visitdt{i:d}" for i in range(1, 4)], + "wt": [f"wt{i:d}" for i in range(1, 4)], + } + result = lreshape(df, spec) + + exp_data = { + "birthdt": [ + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + "08jan2009", + "30dec2008", + "21dec2008", + "11jan2009", + "08jan2009", + "21dec2008", + "11jan2009", + ], + "birthwt": [ + 1766, + 3301, + 1454, + 3139, + 4133, + 1766, + 1454, + 3139, + 4133, + 1766, + 3139, + 4133, + ], + "id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105], + "sex": [ + "Male", + "Female", + "Female", + "Female", + "Female", + "Male", + "Female", + "Female", + "Female", + "Male", + "Female", + "Female", + ], + "visitdt": [ + "11jan2009", + "22dec2008", + "04jan2009", + "29dec2008", + "20jan2009", + "21jan2009", + "22jan2009", + "31dec2008", + "03feb2009", + "05feb2009", + "02jan2009", + "15feb2009", + ], + "wt": [ + 1823.0, + 3338.0, + 1549.0, + 3298.0, + 4306.0, + 2011.0, + 1892.0, + 3338.0, + 4575.0, + 2293.0, + 3377.0, + 4805.0, + ], + } + exp = DataFrame(exp_data, columns=result.columns) + tm.assert_frame_equal(result, exp) + + result = lreshape(df, spec, dropna=False) + exp_data = { + "birthdt": [ + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + ], + "birthwt": [ + 1766, + 3301, + 1454, + 3139, + 4133, + 1766, + 3301, + 1454, + 3139, + 4133, + 1766, + 3301, + 1454, + 3139, + 4133, + ], + "id": [ + 101, + 102, + 103, + 104, + 105, + 101, + 102, + 103, + 104, + 105, + 101, + 102, + 103, + 104, + 105, + ], + "sex": [ + "Male", + "Female", + "Female", + "Female", + "Female", + "Male", + "Female", + "Female", + "Female", + "Female", + "Male", + "Female", + "Female", + "Female", + "Female", + ], + "visitdt": [ + "11jan2009", + "22dec2008", + "04jan2009", + "29dec2008", + "20jan2009", + "21jan2009", + np.nan, + "22jan2009", + "31dec2008", + "03feb2009", + "05feb2009", + np.nan, + np.nan, + "02jan2009", + "15feb2009", + ], + "wt": [ + 1823.0, + 3338.0, + 1549.0, + 3298.0, + 4306.0, + 2011.0, + np.nan, + 1892.0, + 3338.0, + 4575.0, + 2293.0, + np.nan, + np.nan, + 3377.0, + 4805.0, + ], + } + exp = DataFrame(exp_data, columns=result.columns) + tm.assert_frame_equal(result, exp) + + spec = { + "visitdt": [f"visitdt{i:d}" for i in range(1, 3)], + "wt": [f"wt{i:d}" for i in range(1, 4)], + } + msg = "All column lists must be same length" + with pytest.raises(ValueError, match=msg): + lreshape(df, spec) + + +class TestWideToLong: + def test_simple(self): + x = np.random.default_rng(2).standard_normal(3) + df = DataFrame( + { + "A1970": {0: "a", 1: "b", 2: "c"}, + "A1980": {0: "d", 1: "e", 2: "f"}, + "B1970": {0: 2.5, 1: 1.2, 2: 0.7}, + "B1980": {0: 3.2, 1: 1.3, 2: 0.1}, + "X": dict(zip(range(3), x)), + } + ) + df["id"] = df.index + exp_data = { + "X": x.tolist() + x.tolist(), + "A": ["a", "b", "c", "d", "e", "f"], + "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2], + } + expected = DataFrame(exp_data) + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + result = wide_to_long(df, ["A", "B"], i="id", j="year") + tm.assert_frame_equal(result, expected) + + def test_stubs(self): + # GH9204 wide_to_long call should not modify 'stubs' list + df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]]) + df.columns = ["id", "inc1", "inc2", "edu1", "edu2"] + stubs = ["inc", "edu"] + + wide_to_long(df, stubs, i="id", j="age") + + assert stubs == ["inc", "edu"] + + def test_separating_character(self): + # GH14779 + + x = np.random.default_rng(2).standard_normal(3) + df = DataFrame( + { + "A.1970": {0: "a", 1: "b", 2: "c"}, + "A.1980": {0: "d", 1: "e", 2: "f"}, + "B.1970": {0: 2.5, 1: 1.2, 2: 0.7}, + "B.1980": {0: 3.2, 1: 1.3, 2: 0.1}, + "X": dict(zip(range(3), x)), + } + ) + df["id"] = df.index + exp_data = { + "X": x.tolist() + x.tolist(), + "A": ["a", "b", "c", "d", "e", "f"], + "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2], + } + expected = DataFrame(exp_data) + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".") + tm.assert_frame_equal(result, expected) + + def test_escapable_characters(self): + x = np.random.default_rng(2).standard_normal(3) + df = DataFrame( + { + "A(quarterly)1970": {0: "a", 1: "b", 2: "c"}, + "A(quarterly)1980": {0: "d", 1: "e", 2: "f"}, + "B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7}, + "B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1}, + "X": dict(zip(range(3), x)), + } + ) + df["id"] = df.index + exp_data = { + "X": x.tolist() + x.tolist(), + "A(quarterly)": ["a", "b", "c", "d", "e", "f"], + "B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2], + } + expected = DataFrame(exp_data) + expected = expected.set_index(["id", "year"])[ + ["X", "A(quarterly)", "B(quarterly)"] + ] + result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year") + tm.assert_frame_equal(result, expected) + + def test_unbalanced(self): + # test that we can have a varying amount of time variables + df = DataFrame( + { + "A2010": [1.0, 2.0], + "A2011": [3.0, 4.0], + "B2010": [5.0, 6.0], + "X": ["X1", "X2"], + } + ) + df["id"] = df.index + exp_data = { + "X": ["X1", "X2", "X1", "X2"], + "A": [1.0, 2.0, 3.0, 4.0], + "B": [5.0, 6.0, np.nan, np.nan], + "id": [0, 1, 0, 1], + "year": [2010, 2010, 2011, 2011], + } + expected = DataFrame(exp_data) + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + result = wide_to_long(df, ["A", "B"], i="id", j="year") + tm.assert_frame_equal(result, expected) + + def test_character_overlap(self): + # Test we handle overlapping characters in both id_vars and value_vars + df = DataFrame( + { + "A11": ["a11", "a22", "a33"], + "A12": ["a21", "a22", "a23"], + "B11": ["b11", "b12", "b13"], + "B12": ["b21", "b22", "b23"], + "BB11": [1, 2, 3], + "BB12": [4, 5, 6], + "BBBX": [91, 92, 93], + "BBBZ": [91, 92, 93], + } + ) + df["id"] = df.index + expected = DataFrame( + { + "BBBX": [91, 92, 93, 91, 92, 93], + "BBBZ": [91, 92, 93, 91, 92, 93], + "A": ["a11", "a22", "a33", "a21", "a22", "a23"], + "B": ["b11", "b12", "b13", "b21", "b22", "b23"], + "BB": [1, 2, 3, 4, 5, 6], + "id": [0, 1, 2, 0, 1, 2], + "year": [11, 11, 11, 12, 12, 12], + } + ) + expected = expected.set_index(["id", "year"])[["BBBX", "BBBZ", "A", "B", "BB"]] + result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year") + tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) + + def test_invalid_separator(self): + # if an invalid separator is supplied a empty data frame is returned + sep = "nope!" + df = DataFrame( + { + "A2010": [1.0, 2.0], + "A2011": [3.0, 4.0], + "B2010": [5.0, 6.0], + "X": ["X1", "X2"], + } + ) + df["id"] = df.index + exp_data = { + "X": "", + "A2010": [], + "A2011": [], + "B2010": [], + "id": [], + "year": [], + "A": [], + "B": [], + } + expected = DataFrame(exp_data).astype({"year": np.int64}) + expected = expected.set_index(["id", "year"])[ + ["X", "A2010", "A2011", "B2010", "A", "B"] + ] + expected.index = expected.index.set_levels([0, 1], level=0) + result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep) + tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) + + def test_num_string_disambiguation(self): + # Test that we can disambiguate number value_vars from + # string value_vars + df = DataFrame( + { + "A11": ["a11", "a22", "a33"], + "A12": ["a21", "a22", "a23"], + "B11": ["b11", "b12", "b13"], + "B12": ["b21", "b22", "b23"], + "BB11": [1, 2, 3], + "BB12": [4, 5, 6], + "Arating": [91, 92, 93], + "Arating_old": [91, 92, 93], + } + ) + df["id"] = df.index + expected = DataFrame( + { + "Arating": [91, 92, 93, 91, 92, 93], + "Arating_old": [91, 92, 93, 91, 92, 93], + "A": ["a11", "a22", "a33", "a21", "a22", "a23"], + "B": ["b11", "b12", "b13", "b21", "b22", "b23"], + "BB": [1, 2, 3, 4, 5, 6], + "id": [0, 1, 2, 0, 1, 2], + "year": [11, 11, 11, 12, 12, 12], + } + ) + expected = expected.set_index(["id", "year"])[ + ["Arating", "Arating_old", "A", "B", "BB"] + ] + result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year") + tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) + + def test_invalid_suffixtype(self): + # If all stubs names end with a string, but a numeric suffix is + # assumed, an empty data frame is returned + df = DataFrame( + { + "Aone": [1.0, 2.0], + "Atwo": [3.0, 4.0], + "Bone": [5.0, 6.0], + "X": ["X1", "X2"], + } + ) + df["id"] = df.index + exp_data = { + "X": "", + "Aone": [], + "Atwo": [], + "Bone": [], + "id": [], + "year": [], + "A": [], + "B": [], + } + expected = DataFrame(exp_data).astype({"year": np.int64}) + + expected = expected.set_index(["id", "year"]) + expected.index = expected.index.set_levels([0, 1], level=0) + result = wide_to_long(df, ["A", "B"], i="id", j="year") + tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) + + def test_multiple_id_columns(self): + # Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm + df = DataFrame( + { + "famid": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "birth": [1, 2, 3, 1, 2, 3, 1, 2, 3], + "ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], + "ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9], + } + ) + expected = DataFrame( + { + "ht": [ + 2.8, + 3.4, + 2.9, + 3.8, + 2.2, + 2.9, + 2.0, + 3.2, + 1.8, + 2.8, + 1.9, + 2.4, + 2.2, + 3.3, + 2.3, + 3.4, + 2.1, + 2.9, + ], + "famid": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3], + "birth": [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3], + "age": [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2], + } + ) + expected = expected.set_index(["famid", "birth", "age"])[["ht"]] + result = wide_to_long(df, "ht", i=["famid", "birth"], j="age") + tm.assert_frame_equal(result, expected) + + def test_non_unique_idvars(self): + # GH16382 + # Raise an error message if non unique id vars (i) are passed + df = DataFrame( + {"A_A1": [1, 2, 3, 4, 5], "B_B1": [1, 2, 3, 4, 5], "x": [1, 1, 1, 1, 1]} + ) + msg = "the id variables need to uniquely identify each row" + with pytest.raises(ValueError, match=msg): + wide_to_long(df, ["A_A", "B_B"], i="x", j="colname") + + def test_cast_j_int(self): + df = DataFrame( + { + "actor_1": ["CCH Pounder", "Johnny Depp", "Christoph Waltz"], + "actor_2": ["Joel David Moore", "Orlando Bloom", "Rory Kinnear"], + "actor_fb_likes_1": [1000.0, 40000.0, 11000.0], + "actor_fb_likes_2": [936.0, 5000.0, 393.0], + "title": ["Avatar", "Pirates of the Caribbean", "Spectre"], + } + ) + + expected = DataFrame( + { + "actor": [ + "CCH Pounder", + "Johnny Depp", + "Christoph Waltz", + "Joel David Moore", + "Orlando Bloom", + "Rory Kinnear", + ], + "actor_fb_likes": [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0], + "num": [1, 1, 1, 2, 2, 2], + "title": [ + "Avatar", + "Pirates of the Caribbean", + "Spectre", + "Avatar", + "Pirates of the Caribbean", + "Spectre", + ], + } + ).set_index(["title", "num"]) + result = wide_to_long( + df, ["actor", "actor_fb_likes"], i="title", j="num", sep="_" + ) + + tm.assert_frame_equal(result, expected) + + def test_identical_stubnames(self): + df = DataFrame( + { + "A2010": [1.0, 2.0], + "A2011": [3.0, 4.0], + "B2010": [5.0, 6.0], + "A": ["X1", "X2"], + } + ) + msg = "stubname can't be identical to a column name" + with pytest.raises(ValueError, match=msg): + wide_to_long(df, ["A", "B"], i="A", j="colname") + + def test_nonnumeric_suffix(self): + df = DataFrame( + { + "treatment_placebo": [1.0, 2.0], + "treatment_test": [3.0, 4.0], + "result_placebo": [5.0, 6.0], + "A": ["X1", "X2"], + } + ) + expected = DataFrame( + { + "A": ["X1", "X2", "X1", "X2"], + "colname": ["placebo", "placebo", "test", "test"], + "result": [5.0, 6.0, np.nan, np.nan], + "treatment": [1.0, 2.0, 3.0, 4.0], + } + ) + expected = expected.set_index(["A", "colname"]) + result = wide_to_long( + df, ["result", "treatment"], i="A", j="colname", suffix="[a-z]+", sep="_" + ) + tm.assert_frame_equal(result, expected) + + def test_mixed_type_suffix(self): + df = DataFrame( + { + "A": ["X1", "X2"], + "result_1": [0, 9], + "result_foo": [5.0, 6.0], + "treatment_1": [1.0, 2.0], + "treatment_foo": [3.0, 4.0], + } + ) + expected = DataFrame( + { + "A": ["X1", "X2", "X1", "X2"], + "colname": ["1", "1", "foo", "foo"], + "result": [0.0, 9.0, 5.0, 6.0], + "treatment": [1.0, 2.0, 3.0, 4.0], + } + ).set_index(["A", "colname"]) + result = wide_to_long( + df, ["result", "treatment"], i="A", j="colname", suffix=".+", sep="_" + ) + tm.assert_frame_equal(result, expected) + + def test_float_suffix(self): + df = DataFrame( + { + "treatment_1.1": [1.0, 2.0], + "treatment_2.1": [3.0, 4.0], + "result_1.2": [5.0, 6.0], + "result_1": [0, 9], + "A": ["X1", "X2"], + } + ) + expected = DataFrame( + { + "A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"], + "colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1], + "result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan], + "treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0], + } + ) + expected = expected.set_index(["A", "colname"]) + result = wide_to_long( + df, ["result", "treatment"], i="A", j="colname", suffix="[0-9.]+", sep="_" + ) + tm.assert_frame_equal(result, expected) + + def test_col_substring_of_stubname(self): + # GH22468 + # Don't raise ValueError when a column name is a substring + # of a stubname that's been passed as a string + wide_data = { + "node_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}, + "A": {0: 0.80, 1: 0.0, 2: 0.25, 3: 1.0, 4: 0.81}, + "PA0": {0: 0.74, 1: 0.56, 2: 0.56, 3: 0.98, 4: 0.6}, + "PA1": {0: 0.77, 1: 0.64, 2: 0.52, 3: 0.98, 4: 0.67}, + "PA3": {0: 0.34, 1: 0.70, 2: 0.52, 3: 0.98, 4: 0.67}, + } + wide_df = DataFrame.from_dict(wide_data) + expected = wide_to_long(wide_df, stubnames=["PA"], i=["node_id", "A"], j="time") + result = wide_to_long(wide_df, stubnames="PA", i=["node_id", "A"], j="time") + tm.assert_frame_equal(result, expected) + + def test_raise_of_column_name_value(self): + # GH34731, enforced in 2.0 + # raise a ValueError if the resultant value column name matches + # a name in the dataframe already (default name is "value") + df = DataFrame({"col": list("ABC"), "value": range(10, 16, 2)}) + + with pytest.raises( + ValueError, match=re.escape("value_name (value) cannot match") + ): + df.melt(id_vars="value", value_name="value") + + @pytest.mark.parametrize("dtype", ["O", "string"]) + def test_missing_stubname(self, dtype): + # GH46044 + df = DataFrame({"id": ["1", "2"], "a-1": [100, 200], "a-2": [300, 400]}) + df = df.astype({"id": dtype}) + result = wide_to_long( + df, + stubnames=["a", "b"], + i="id", + j="num", + sep="-", + ) + index = Index( + [("1", 1), ("2", 1), ("1", 2), ("2", 2)], + name=("id", "num"), + ) + expected = DataFrame( + {"a": [100, 200, 300, 400], "b": [np.nan] * 4}, + index=index, + ) + new_level = expected.index.levels[0].astype(dtype) + expected.index = expected.index.set_levels(new_level, level=0) + tm.assert_frame_equal(result, expected) + + +def test_wide_to_long_pyarrow_string_columns(): + # GH 57066 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "ID": {0: 1}, + "R_test1": {0: 1}, + "R_test2": {0: 1}, + "R_test3": {0: 2}, + "D": {0: 1}, + } + ) + df.columns = df.columns.astype("string[pyarrow_numpy]") + result = wide_to_long( + df, stubnames="R", i="ID", j="UNPIVOTED", sep="_", suffix=".*" + ) + expected = DataFrame( + [[1, 1], [1, 1], [1, 2]], + columns=Index(["D", "R"], dtype=object), + index=pd.MultiIndex.from_arrays( + [ + [1, 1, 1], + Index(["test1", "test2", "test3"], dtype="string[pyarrow_numpy]"), + ], + names=["ID", "UNPIVOTED"], + ), + ) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_pivot.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_pivot.py new file mode 100644 index 0000000000000000000000000000000000000000..18a449b4d0c67b55f24aa590229dddb0ed456054 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_pivot.py @@ -0,0 +1,2714 @@ +from datetime import ( + date, + datetime, + timedelta, +) +from itertools import product +import re + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Grouper, + Index, + MultiIndex, + Series, + concat, + date_range, +) +import pandas._testing as tm +from pandas.api.types import CategoricalDtype +from pandas.core.reshape import reshape as reshape_lib +from pandas.core.reshape.pivot import pivot_table + + +@pytest.fixture(params=[True, False]) +def dropna(request): + return request.param + + +@pytest.fixture(params=[([0] * 4, [1] * 4), (range(3), range(1, 4))]) +def interval_values(request, closed): + left, right = request.param + return Categorical(pd.IntervalIndex.from_arrays(left, right, closed)) + + +class TestPivotTable: + @pytest.fixture + def data(self): + return DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + def test_pivot_table(self, observed, data): + index = ["A", "B"] + columns = "C" + table = pivot_table( + data, values="D", index=index, columns=columns, observed=observed + ) + + table2 = data.pivot_table( + values="D", index=index, columns=columns, observed=observed + ) + tm.assert_frame_equal(table, table2) + + # this works + pivot_table(data, values="D", index=index, observed=observed) + + if len(index) > 1: + assert table.index.names == tuple(index) + else: + assert table.index.name == index[0] + + if len(columns) > 1: + assert table.columns.names == columns + else: + assert table.columns.name == columns[0] + + expected = data.groupby(index + [columns])["D"].agg("mean").unstack() + tm.assert_frame_equal(table, expected) + + def test_pivot_table_categorical_observed_equal(self, observed): + # issue #24923 + df = DataFrame( + {"col1": list("abcde"), "col2": list("fghij"), "col3": [1, 2, 3, 4, 5]} + ) + + expected = df.pivot_table( + index="col1", values="col3", columns="col2", aggfunc="sum", fill_value=0 + ) + + expected.index = expected.index.astype("category") + expected.columns = expected.columns.astype("category") + + df.col1 = df.col1.astype("category") + df.col2 = df.col2.astype("category") + + result = df.pivot_table( + index="col1", + values="col3", + columns="col2", + aggfunc="sum", + fill_value=0, + observed=observed, + ) + + tm.assert_frame_equal(result, expected) + + def test_pivot_table_nocols(self): + df = DataFrame( + {"rows": ["a", "b", "c"], "cols": ["x", "y", "z"], "values": [1, 2, 3]} + ) + rs = df.pivot_table(columns="cols", aggfunc="sum") + xp = df.pivot_table(index="cols", aggfunc="sum").T + tm.assert_frame_equal(rs, xp) + + rs = df.pivot_table(columns="cols", aggfunc={"values": "mean"}) + xp = df.pivot_table(index="cols", aggfunc={"values": "mean"}).T + tm.assert_frame_equal(rs, xp) + + def test_pivot_table_dropna(self): + df = DataFrame( + { + "amount": {0: 60000, 1: 100000, 2: 50000, 3: 30000}, + "customer": {0: "A", 1: "A", 2: "B", 3: "C"}, + "month": {0: 201307, 1: 201309, 2: 201308, 3: 201310}, + "product": {0: "a", 1: "b", 2: "c", 3: "d"}, + "quantity": {0: 2000000, 1: 500000, 2: 1000000, 3: 1000000}, + } + ) + pv_col = df.pivot_table( + "quantity", "month", ["customer", "product"], dropna=False + ) + pv_ind = df.pivot_table( + "quantity", ["customer", "product"], "month", dropna=False + ) + + m = MultiIndex.from_tuples( + [ + ("A", "a"), + ("A", "b"), + ("A", "c"), + ("A", "d"), + ("B", "a"), + ("B", "b"), + ("B", "c"), + ("B", "d"), + ("C", "a"), + ("C", "b"), + ("C", "c"), + ("C", "d"), + ], + names=["customer", "product"], + ) + tm.assert_index_equal(pv_col.columns, m) + tm.assert_index_equal(pv_ind.index, m) + + def test_pivot_table_categorical(self): + cat1 = Categorical( + ["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True + ) + cat2 = Categorical( + ["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True + ) + df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]}) + msg = "The default value of observed=False is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pivot_table(df, values="values", index=["A", "B"], dropna=True) + + exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"]) + expected = DataFrame({"values": [1.0, 2.0, 3.0, 4.0]}, index=exp_index) + tm.assert_frame_equal(result, expected) + + def test_pivot_table_dropna_categoricals(self, dropna): + # GH 15193 + categories = ["a", "b", "c", "d"] + + df = DataFrame( + { + "A": ["a", "a", "a", "b", "b", "b", "c", "c", "c"], + "B": [1, 2, 3, 1, 2, 3, 1, 2, 3], + "C": range(9), + } + ) + + df["A"] = df["A"].astype(CategoricalDtype(categories, ordered=False)) + msg = "The default value of observed=False is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.pivot_table(index="B", columns="A", values="C", dropna=dropna) + expected_columns = Series(["a", "b", "c"], name="A") + expected_columns = expected_columns.astype( + CategoricalDtype(categories, ordered=False) + ) + expected_index = Series([1, 2, 3], name="B") + expected = DataFrame( + [[0.0, 3.0, 6.0], [1.0, 4.0, 7.0], [2.0, 5.0, 8.0]], + index=expected_index, + columns=expected_columns, + ) + if not dropna: + # add back the non observed to compare + expected = expected.reindex(columns=Categorical(categories)).astype("float") + + tm.assert_frame_equal(result, expected) + + def test_pivot_with_non_observable_dropna(self, dropna): + # gh-21133 + df = DataFrame( + { + "A": Categorical( + [np.nan, "low", "high", "low", "high"], + categories=["low", "high"], + ordered=True, + ), + "B": [0.0, 1.0, 2.0, 3.0, 4.0], + } + ) + + msg = "The default value of observed=False is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.pivot_table(index="A", values="B", dropna=dropna) + if dropna: + values = [2.0, 3.0] + codes = [0, 1] + else: + # GH: 10772 + values = [2.0, 3.0, 0.0] + codes = [0, 1, -1] + expected = DataFrame( + {"B": values}, + index=Index( + Categorical.from_codes( + codes, categories=["low", "high"], ordered=dropna + ), + name="A", + ), + ) + + tm.assert_frame_equal(result, expected) + + def test_pivot_with_non_observable_dropna_multi_cat(self, dropna): + # gh-21378 + df = DataFrame( + { + "A": Categorical( + ["left", "low", "high", "low", "high"], + categories=["low", "high", "left"], + ordered=True, + ), + "B": range(5), + } + ) + + msg = "The default value of observed=False is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.pivot_table(index="A", values="B", dropna=dropna) + expected = DataFrame( + {"B": [2.0, 3.0, 0.0]}, + index=Index( + Categorical.from_codes( + [0, 1, 2], categories=["low", "high", "left"], ordered=True + ), + name="A", + ), + ) + if not dropna: + expected["B"] = expected["B"].astype(float) + + tm.assert_frame_equal(result, expected) + + def test_pivot_with_interval_index(self, interval_values, dropna): + # GH 25814 + df = DataFrame({"A": interval_values, "B": 1}) + + msg = "The default value of observed=False is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.pivot_table(index="A", values="B", dropna=dropna) + expected = DataFrame( + {"B": 1.0}, index=Index(interval_values.unique(), name="A") + ) + if not dropna: + expected = expected.astype(float) + tm.assert_frame_equal(result, expected) + + def test_pivot_with_interval_index_margins(self): + # GH 25815 + ordered_cat = pd.IntervalIndex.from_arrays([0, 0, 1, 1], [1, 1, 2, 2]) + df = DataFrame( + { + "A": np.arange(4, 0, -1, dtype=np.intp), + "B": ["a", "b", "a", "b"], + "C": Categorical(ordered_cat, ordered=True).sort_values( + ascending=False + ), + } + ) + + msg = "The default value of observed=False is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + pivot_tab = pivot_table( + df, index="C", columns="B", values="A", aggfunc="sum", margins=True + ) + + result = pivot_tab["All"] + expected = Series( + [3, 7, 10], + index=Index([pd.Interval(0, 1), pd.Interval(1, 2), "All"], name="C"), + name="All", + dtype=np.intp, + ) + tm.assert_series_equal(result, expected) + + def test_pass_array(self, data): + result = data.pivot_table("D", index=data.A, columns=data.C) + expected = data.pivot_table("D", index="A", columns="C") + tm.assert_frame_equal(result, expected) + + def test_pass_function(self, data): + result = data.pivot_table("D", index=lambda x: x // 5, columns=data.C) + expected = data.pivot_table("D", index=data.index // 5, columns="C") + tm.assert_frame_equal(result, expected) + + def test_pivot_table_multiple(self, data): + index = ["A", "B"] + columns = "C" + table = pivot_table(data, index=index, columns=columns) + expected = data.groupby(index + [columns]).agg("mean").unstack() + tm.assert_frame_equal(table, expected) + + def test_pivot_dtypes(self): + # can convert dtypes + f = DataFrame( + { + "a": ["cat", "bat", "cat", "bat"], + "v": [1, 2, 3, 4], + "i": ["a", "b", "a", "b"], + } + ) + assert f.dtypes["v"] == "int64" + + z = pivot_table( + f, values="v", index=["a"], columns=["i"], fill_value=0, aggfunc="sum" + ) + result = z.dtypes + expected = Series([np.dtype("int64")] * 2, index=Index(list("ab"), name="i")) + tm.assert_series_equal(result, expected) + + # cannot convert dtypes + f = DataFrame( + { + "a": ["cat", "bat", "cat", "bat"], + "v": [1.5, 2.5, 3.5, 4.5], + "i": ["a", "b", "a", "b"], + } + ) + assert f.dtypes["v"] == "float64" + + z = pivot_table( + f, values="v", index=["a"], columns=["i"], fill_value=0, aggfunc="mean" + ) + result = z.dtypes + expected = Series([np.dtype("float64")] * 2, index=Index(list("ab"), name="i")) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "columns,values", + [ + ("bool1", ["float1", "float2"]), + ("bool1", ["float1", "float2", "bool1"]), + ("bool2", ["float1", "float2", "bool1"]), + ], + ) + def test_pivot_preserve_dtypes(self, columns, values): + # GH 7142 regression test + v = np.arange(5, dtype=np.float64) + df = DataFrame( + {"float1": v, "float2": v + 2.0, "bool1": v <= 2, "bool2": v <= 3} + ) + + df_res = df.reset_index().pivot_table( + index="index", columns=columns, values=values + ) + + result = dict(df_res.dtypes) + expected = {col: np.dtype("float64") for col in df_res} + assert result == expected + + def test_pivot_no_values(self): + # GH 14380 + idx = pd.DatetimeIndex( + ["2011-01-01", "2011-02-01", "2011-01-02", "2011-01-01", "2011-01-02"] + ) + df = DataFrame({"A": [1, 2, 3, 4, 5]}, index=idx) + res = df.pivot_table(index=df.index.month, columns=df.index.day) + + exp_columns = MultiIndex.from_tuples([("A", 1), ("A", 2)]) + exp_columns = exp_columns.set_levels( + exp_columns.levels[1].astype(np.int32), level=1 + ) + exp = DataFrame( + [[2.5, 4.0], [2.0, np.nan]], + index=Index([1, 2], dtype=np.int32), + columns=exp_columns, + ) + tm.assert_frame_equal(res, exp) + + df = DataFrame( + { + "A": [1, 2, 3, 4, 5], + "dt": date_range("2011-01-01", freq="D", periods=5), + }, + index=idx, + ) + res = df.pivot_table(index=df.index.month, columns=Grouper(key="dt", freq="ME")) + exp_columns = MultiIndex.from_arrays( + [["A"], pd.DatetimeIndex(["2011-01-31"], dtype="M8[ns]")], + names=[None, "dt"], + ) + exp = DataFrame( + [3.25, 2.0], index=Index([1, 2], dtype=np.int32), columns=exp_columns + ) + tm.assert_frame_equal(res, exp) + + res = df.pivot_table( + index=Grouper(freq="YE"), columns=Grouper(key="dt", freq="ME") + ) + exp = DataFrame( + [3.0], + index=pd.DatetimeIndex(["2011-12-31"], freq="YE"), + columns=exp_columns, + ) + tm.assert_frame_equal(res, exp) + + def test_pivot_multi_values(self, data): + result = pivot_table( + data, values=["D", "E"], index="A", columns=["B", "C"], fill_value=0 + ) + expected = pivot_table( + data.drop(["F"], axis=1), index="A", columns=["B", "C"], fill_value=0 + ) + tm.assert_frame_equal(result, expected) + + def test_pivot_multi_functions(self, data): + f = lambda func: pivot_table( + data, values=["D", "E"], index=["A", "B"], columns="C", aggfunc=func + ) + result = f(["mean", "std"]) + means = f("mean") + stds = f("std") + expected = concat([means, stds], keys=["mean", "std"], axis=1) + tm.assert_frame_equal(result, expected) + + # margins not supported?? + f = lambda func: pivot_table( + data, + values=["D", "E"], + index=["A", "B"], + columns="C", + aggfunc=func, + margins=True, + ) + result = f(["mean", "std"]) + means = f("mean") + stds = f("std") + expected = concat([means, stds], keys=["mean", "std"], axis=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("method", [True, False]) + def test_pivot_index_with_nan(self, method): + # GH 3588 + nan = np.nan + df = DataFrame( + { + "a": ["R1", "R2", nan, "R4"], + "b": ["C1", "C2", "C3", "C4"], + "c": [10, 15, 17, 20], + } + ) + if method: + result = df.pivot(index="a", columns="b", values="c") + else: + result = pd.pivot(df, index="a", columns="b", values="c") + expected = DataFrame( + [ + [nan, nan, 17, nan], + [10, nan, nan, nan], + [nan, 15, nan, nan], + [nan, nan, nan, 20], + ], + index=Index([nan, "R1", "R2", "R4"], name="a"), + columns=Index(["C1", "C2", "C3", "C4"], name="b"), + ) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(df.pivot(index="b", columns="a", values="c"), expected.T) + + @pytest.mark.parametrize("method", [True, False]) + def test_pivot_index_with_nan_dates(self, method): + # GH9491 + df = DataFrame( + { + "a": date_range("2014-02-01", periods=6, freq="D"), + "c": 100 + np.arange(6), + } + ) + df["b"] = df["a"] - pd.Timestamp("2014-02-02") + df.loc[1, "a"] = df.loc[3, "a"] = np.nan + df.loc[1, "b"] = df.loc[4, "b"] = np.nan + + if method: + pv = df.pivot(index="a", columns="b", values="c") + else: + pv = pd.pivot(df, index="a", columns="b", values="c") + assert pv.notna().values.sum() == len(df) + + for _, row in df.iterrows(): + assert pv.loc[row["a"], row["b"]] == row["c"] + + if method: + result = df.pivot(index="b", columns="a", values="c") + else: + result = pd.pivot(df, index="b", columns="a", values="c") + tm.assert_frame_equal(result, pv.T) + + @pytest.mark.parametrize("method", [True, False]) + def test_pivot_with_tz(self, method, unit): + # GH 5878 + df = DataFrame( + { + "dt1": pd.DatetimeIndex( + [ + datetime(2013, 1, 1, 9, 0), + datetime(2013, 1, 2, 9, 0), + datetime(2013, 1, 1, 9, 0), + datetime(2013, 1, 2, 9, 0), + ], + dtype=f"M8[{unit}, US/Pacific]", + ), + "dt2": pd.DatetimeIndex( + [ + datetime(2014, 1, 1, 9, 0), + datetime(2014, 1, 1, 9, 0), + datetime(2014, 1, 2, 9, 0), + datetime(2014, 1, 2, 9, 0), + ], + dtype=f"M8[{unit}, Asia/Tokyo]", + ), + "data1": np.arange(4, dtype="int64"), + "data2": np.arange(4, dtype="int64"), + } + ) + + exp_col1 = Index(["data1", "data1", "data2", "data2"]) + exp_col2 = pd.DatetimeIndex( + ["2014/01/01 09:00", "2014/01/02 09:00"] * 2, + name="dt2", + dtype=f"M8[{unit}, Asia/Tokyo]", + ) + exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) + exp_idx = pd.DatetimeIndex( + ["2013/01/01 09:00", "2013/01/02 09:00"], + name="dt1", + dtype=f"M8[{unit}, US/Pacific]", + ) + expected = DataFrame( + [[0, 2, 0, 2], [1, 3, 1, 3]], + index=exp_idx, + columns=exp_col, + ) + + if method: + pv = df.pivot(index="dt1", columns="dt2") + else: + pv = pd.pivot(df, index="dt1", columns="dt2") + tm.assert_frame_equal(pv, expected) + + expected = DataFrame( + [[0, 2], [1, 3]], + index=exp_idx, + columns=exp_col2[:2], + ) + + if method: + pv = df.pivot(index="dt1", columns="dt2", values="data1") + else: + pv = pd.pivot(df, index="dt1", columns="dt2", values="data1") + tm.assert_frame_equal(pv, expected) + + def test_pivot_tz_in_values(self): + # GH 14948 + df = DataFrame( + [ + { + "uid": "aa", + "ts": pd.Timestamp("2016-08-12 13:00:00-0700", tz="US/Pacific"), + }, + { + "uid": "aa", + "ts": pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"), + }, + { + "uid": "aa", + "ts": pd.Timestamp("2016-08-12 14:00:00-0700", tz="US/Pacific"), + }, + { + "uid": "aa", + "ts": pd.Timestamp("2016-08-25 11:00:00-0700", tz="US/Pacific"), + }, + { + "uid": "aa", + "ts": pd.Timestamp("2016-08-25 13:00:00-0700", tz="US/Pacific"), + }, + ] + ) + + df = df.set_index("ts").reset_index() + mins = df.ts.map(lambda x: x.replace(hour=0, minute=0, second=0, microsecond=0)) + + result = pivot_table( + df.set_index("ts").reset_index(), + values="ts", + index=["uid"], + columns=[mins], + aggfunc="min", + ) + expected = DataFrame( + [ + [ + pd.Timestamp("2016-08-12 08:00:00-0700", tz="US/Pacific"), + pd.Timestamp("2016-08-25 11:00:00-0700", tz="US/Pacific"), + ] + ], + index=Index(["aa"], name="uid"), + columns=pd.DatetimeIndex( + [ + pd.Timestamp("2016-08-12 00:00:00", tz="US/Pacific"), + pd.Timestamp("2016-08-25 00:00:00", tz="US/Pacific"), + ], + name="ts", + ), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("method", [True, False]) + def test_pivot_periods(self, method): + df = DataFrame( + { + "p1": [ + pd.Period("2013-01-01", "D"), + pd.Period("2013-01-02", "D"), + pd.Period("2013-01-01", "D"), + pd.Period("2013-01-02", "D"), + ], + "p2": [ + pd.Period("2013-01", "M"), + pd.Period("2013-01", "M"), + pd.Period("2013-02", "M"), + pd.Period("2013-02", "M"), + ], + "data1": np.arange(4, dtype="int64"), + "data2": np.arange(4, dtype="int64"), + } + ) + + exp_col1 = Index(["data1", "data1", "data2", "data2"]) + exp_col2 = pd.PeriodIndex(["2013-01", "2013-02"] * 2, name="p2", freq="M") + exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) + expected = DataFrame( + [[0, 2, 0, 2], [1, 3, 1, 3]], + index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"), + columns=exp_col, + ) + if method: + pv = df.pivot(index="p1", columns="p2") + else: + pv = pd.pivot(df, index="p1", columns="p2") + tm.assert_frame_equal(pv, expected) + + expected = DataFrame( + [[0, 2], [1, 3]], + index=pd.PeriodIndex(["2013-01-01", "2013-01-02"], name="p1", freq="D"), + columns=pd.PeriodIndex(["2013-01", "2013-02"], name="p2", freq="M"), + ) + if method: + pv = df.pivot(index="p1", columns="p2", values="data1") + else: + pv = pd.pivot(df, index="p1", columns="p2", values="data1") + tm.assert_frame_equal(pv, expected) + + def test_pivot_periods_with_margins(self): + # GH 28323 + df = DataFrame( + { + "a": [1, 1, 2, 2], + "b": [ + pd.Period("2019Q1"), + pd.Period("2019Q2"), + pd.Period("2019Q1"), + pd.Period("2019Q2"), + ], + "x": 1.0, + } + ) + + expected = DataFrame( + data=1.0, + index=Index([1, 2, "All"], name="a"), + columns=Index([pd.Period("2019Q1"), pd.Period("2019Q2"), "All"], name="b"), + ) + + result = df.pivot_table(index="a", columns="b", values="x", margins=True) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize( + "values", + [ + ["baz", "zoo"], + np.array(["baz", "zoo"]), + Series(["baz", "zoo"]), + Index(["baz", "zoo"]), + ], + ) + @pytest.mark.parametrize("method", [True, False]) + def test_pivot_with_list_like_values(self, values, method): + # issue #17160 + df = DataFrame( + { + "foo": ["one", "one", "one", "two", "two", "two"], + "bar": ["A", "B", "C", "A", "B", "C"], + "baz": [1, 2, 3, 4, 5, 6], + "zoo": ["x", "y", "z", "q", "w", "t"], + } + ) + + if method: + result = df.pivot(index="foo", columns="bar", values=values) + else: + result = pd.pivot(df, index="foo", columns="bar", values=values) + + data = [[1, 2, 3, "x", "y", "z"], [4, 5, 6, "q", "w", "t"]] + index = Index(data=["one", "two"], name="foo") + columns = MultiIndex( + levels=[["baz", "zoo"], ["A", "B", "C"]], + codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], + names=[None, "bar"], + ) + expected = DataFrame(data=data, index=index, columns=columns) + expected["baz"] = expected["baz"].astype(object) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "values", + [ + ["bar", "baz"], + np.array(["bar", "baz"]), + Series(["bar", "baz"]), + Index(["bar", "baz"]), + ], + ) + @pytest.mark.parametrize("method", [True, False]) + def test_pivot_with_list_like_values_nans(self, values, method): + # issue #17160 + df = DataFrame( + { + "foo": ["one", "one", "one", "two", "two", "two"], + "bar": ["A", "B", "C", "A", "B", "C"], + "baz": [1, 2, 3, 4, 5, 6], + "zoo": ["x", "y", "z", "q", "w", "t"], + } + ) + + if method: + result = df.pivot(index="zoo", columns="foo", values=values) + else: + result = pd.pivot(df, index="zoo", columns="foo", values=values) + + data = [ + [np.nan, "A", np.nan, 4], + [np.nan, "C", np.nan, 6], + [np.nan, "B", np.nan, 5], + ["A", np.nan, 1, np.nan], + ["B", np.nan, 2, np.nan], + ["C", np.nan, 3, np.nan], + ] + index = Index(data=["q", "t", "w", "x", "y", "z"], name="zoo") + columns = MultiIndex( + levels=[["bar", "baz"], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=[None, "foo"], + ) + expected = DataFrame(data=data, index=index, columns=columns) + expected["baz"] = expected["baz"].astype(object) + tm.assert_frame_equal(result, expected) + + def test_pivot_columns_none_raise_error(self): + # GH 30924 + df = DataFrame({"col1": ["a", "b", "c"], "col2": [1, 2, 3], "col3": [1, 2, 3]}) + msg = r"pivot\(\) missing 1 required keyword-only argument: 'columns'" + with pytest.raises(TypeError, match=msg): + df.pivot(index="col1", values="col3") # pylint: disable=missing-kwoa + + @pytest.mark.xfail( + reason="MultiIndexed unstack with tuple names fails with KeyError GH#19966" + ) + @pytest.mark.parametrize("method", [True, False]) + def test_pivot_with_multiindex(self, method): + # issue #17160 + index = Index(data=[0, 1, 2, 3, 4, 5]) + data = [ + ["one", "A", 1, "x"], + ["one", "B", 2, "y"], + ["one", "C", 3, "z"], + ["two", "A", 4, "q"], + ["two", "B", 5, "w"], + ["two", "C", 6, "t"], + ] + columns = MultiIndex( + levels=[["bar", "baz"], ["first", "second"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + ) + df = DataFrame(data=data, index=index, columns=columns, dtype="object") + if method: + result = df.pivot( + index=("bar", "first"), + columns=("bar", "second"), + values=("baz", "first"), + ) + else: + result = pd.pivot( + df, + index=("bar", "first"), + columns=("bar", "second"), + values=("baz", "first"), + ) + + data = { + "A": Series([1, 4], index=["one", "two"]), + "B": Series([2, 5], index=["one", "two"]), + "C": Series([3, 6], index=["one", "two"]), + } + expected = DataFrame(data) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("method", [True, False]) + def test_pivot_with_tuple_of_values(self, method): + # issue #17160 + df = DataFrame( + { + "foo": ["one", "one", "one", "two", "two", "two"], + "bar": ["A", "B", "C", "A", "B", "C"], + "baz": [1, 2, 3, 4, 5, 6], + "zoo": ["x", "y", "z", "q", "w", "t"], + } + ) + with pytest.raises(KeyError, match=r"^\('bar', 'baz'\)$"): + # tuple is seen as a single column name + if method: + df.pivot(index="zoo", columns="foo", values=("bar", "baz")) + else: + pd.pivot(df, index="zoo", columns="foo", values=("bar", "baz")) + + def _check_output( + self, + result, + values_col, + data, + index=["A", "B"], + columns=["C"], + margins_col="All", + ): + col_margins = result.loc[result.index[:-1], margins_col] + expected_col_margins = data.groupby(index)[values_col].mean() + tm.assert_series_equal(col_margins, expected_col_margins, check_names=False) + assert col_margins.name == margins_col + + result = result.sort_index() + index_margins = result.loc[(margins_col, "")].iloc[:-1] + + expected_ix_margins = data.groupby(columns)[values_col].mean() + tm.assert_series_equal(index_margins, expected_ix_margins, check_names=False) + assert index_margins.name == (margins_col, "") + + grand_total_margins = result.loc[(margins_col, ""), margins_col] + expected_total_margins = data[values_col].mean() + assert grand_total_margins == expected_total_margins + + def test_margins(self, data): + # column specified + result = data.pivot_table( + values="D", index=["A", "B"], columns="C", margins=True, aggfunc="mean" + ) + self._check_output(result, "D", data) + + # Set a different margins_name (not 'All') + result = data.pivot_table( + values="D", + index=["A", "B"], + columns="C", + margins=True, + aggfunc="mean", + margins_name="Totals", + ) + self._check_output(result, "D", data, margins_col="Totals") + + # no column specified + table = data.pivot_table( + index=["A", "B"], columns="C", margins=True, aggfunc="mean" + ) + for value_col in table.columns.levels[0]: + self._check_output(table[value_col], value_col, data) + + def test_no_col(self, data): + # no col + + # to help with a buglet + data.columns = [k * 2 for k in data.columns] + msg = re.escape("agg function failed [how->mean,dtype->") + with pytest.raises(TypeError, match=msg): + data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean") + table = data.drop(columns="CC").pivot_table( + index=["AA", "BB"], margins=True, aggfunc="mean" + ) + for value_col in table.columns: + totals = table.loc[("All", ""), value_col] + assert totals == data[value_col].mean() + + with pytest.raises(TypeError, match=msg): + data.pivot_table(index=["AA", "BB"], margins=True, aggfunc="mean") + table = data.drop(columns="CC").pivot_table( + index=["AA", "BB"], margins=True, aggfunc="mean" + ) + for item in ["DD", "EE", "FF"]: + totals = table.loc[("All", ""), item] + assert totals == data[item].mean() + + @pytest.mark.parametrize( + "columns, aggfunc, values, expected_columns", + [ + ( + "A", + "mean", + [[5.5, 5.5, 2.2, 2.2], [8.0, 8.0, 4.4, 4.4]], + Index(["bar", "All", "foo", "All"], name="A"), + ), + ( + ["A", "B"], + "sum", + [ + [9, 13, 22, 5, 6, 11], + [14, 18, 32, 11, 11, 22], + ], + MultiIndex.from_tuples( + [ + ("bar", "one"), + ("bar", "two"), + ("bar", "All"), + ("foo", "one"), + ("foo", "two"), + ("foo", "All"), + ], + names=["A", "B"], + ), + ), + ], + ) + def test_margin_with_only_columns_defined( + self, columns, aggfunc, values, expected_columns + ): + # GH 31016 + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + if aggfunc != "sum": + msg = re.escape("agg function failed [how->mean,dtype->") + with pytest.raises(TypeError, match=msg): + df.pivot_table(columns=columns, margins=True, aggfunc=aggfunc) + if "B" not in columns: + df = df.drop(columns="B") + result = df.drop(columns="C").pivot_table( + columns=columns, margins=True, aggfunc=aggfunc + ) + expected = DataFrame(values, index=Index(["D", "E"]), columns=expected_columns) + + tm.assert_frame_equal(result, expected) + + def test_margins_dtype(self, data): + # GH 17013 + + df = data.copy() + df[["D", "E", "F"]] = np.arange(len(df) * 3).reshape(len(df), 3).astype("i8") + + mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")] + mi = MultiIndex.from_tuples(mi_val, names=("A", "B")) + expected = DataFrame( + {"dull": [12, 21, 3, 9, 45], "shiny": [33, 0, 36, 51, 120]}, index=mi + ).rename_axis("C", axis=1) + expected["All"] = expected["dull"] + expected["shiny"] + + result = df.pivot_table( + values="D", + index=["A", "B"], + columns="C", + margins=True, + aggfunc="sum", + fill_value=0, + ) + + tm.assert_frame_equal(expected, result) + + def test_margins_dtype_len(self, data): + mi_val = list(product(["bar", "foo"], ["one", "two"])) + [("All", "")] + mi = MultiIndex.from_tuples(mi_val, names=("A", "B")) + expected = DataFrame( + {"dull": [1, 1, 2, 1, 5], "shiny": [2, 0, 2, 2, 6]}, index=mi + ).rename_axis("C", axis=1) + expected["All"] = expected["dull"] + expected["shiny"] + + result = data.pivot_table( + values="D", + index=["A", "B"], + columns="C", + margins=True, + aggfunc=len, + fill_value=0, + ) + + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize("cols", [(1, 2), ("a", "b"), (1, "b"), ("a", 1)]) + def test_pivot_table_multiindex_only(self, cols): + # GH 17038 + df2 = DataFrame({cols[0]: [1, 2, 3], cols[1]: [1, 2, 3], "v": [4, 5, 6]}) + + result = df2.pivot_table(values="v", columns=cols) + expected = DataFrame( + [[4.0, 5.0, 6.0]], + columns=MultiIndex.from_tuples([(1, 1), (2, 2), (3, 3)], names=cols), + index=Index(["v"], dtype=object), + ) + + tm.assert_frame_equal(result, expected) + + def test_pivot_table_retains_tz(self): + dti = date_range("2016-01-01", periods=3, tz="Europe/Amsterdam") + df = DataFrame( + { + "A": np.random.default_rng(2).standard_normal(3), + "B": np.random.default_rng(2).standard_normal(3), + "C": dti, + } + ) + result = df.pivot_table(index=["B", "C"], dropna=False) + + # check tz retention + assert result.index.levels[1].equals(dti) + + def test_pivot_integer_columns(self): + # caused by upstream bug in unstack + + d = date.min + data = list( + product( + ["foo", "bar"], + ["A", "B", "C"], + ["x1", "x2"], + [d + timedelta(i) for i in range(20)], + [1.0], + ) + ) + df = DataFrame(data) + table = df.pivot_table(values=4, index=[0, 1, 3], columns=[2]) + + df2 = df.rename(columns=str) + table2 = df2.pivot_table(values="4", index=["0", "1", "3"], columns=["2"]) + + tm.assert_frame_equal(table, table2, check_names=False) + + def test_pivot_no_level_overlap(self): + # GH #1181 + + data = DataFrame( + { + "a": ["a", "a", "a", "a", "b", "b", "b", "b"] * 2, + "b": [0, 0, 0, 0, 1, 1, 1, 1] * 2, + "c": (["foo"] * 4 + ["bar"] * 4) * 2, + "value": np.random.default_rng(2).standard_normal(16), + } + ) + + table = data.pivot_table("value", index="a", columns=["b", "c"]) + + grouped = data.groupby(["a", "b", "c"])["value"].mean() + expected = grouped.unstack("b").unstack("c").dropna(axis=1, how="all") + tm.assert_frame_equal(table, expected) + + def test_pivot_columns_lexsorted(self): + n = 10000 + + dtype = np.dtype( + [ + ("Index", object), + ("Symbol", object), + ("Year", int), + ("Month", int), + ("Day", int), + ("Quantity", int), + ("Price", float), + ] + ) + + products = np.array( + [ + ("SP500", "ADBE"), + ("SP500", "NVDA"), + ("SP500", "ORCL"), + ("NDQ100", "AAPL"), + ("NDQ100", "MSFT"), + ("NDQ100", "GOOG"), + ("FTSE", "DGE.L"), + ("FTSE", "TSCO.L"), + ("FTSE", "GSK.L"), + ], + dtype=[("Index", object), ("Symbol", object)], + ) + items = np.empty(n, dtype=dtype) + iproduct = np.random.default_rng(2).integers(0, len(products), n) + items["Index"] = products["Index"][iproduct] + items["Symbol"] = products["Symbol"][iproduct] + dr = date_range(date(2000, 1, 1), date(2010, 12, 31)) + dates = dr[np.random.default_rng(2).integers(0, len(dr), n)] + items["Year"] = dates.year + items["Month"] = dates.month + items["Day"] = dates.day + items["Price"] = np.random.default_rng(2).lognormal(4.0, 2.0, n) + + df = DataFrame(items) + + pivoted = df.pivot_table( + "Price", + index=["Month", "Day"], + columns=["Index", "Symbol", "Year"], + aggfunc="mean", + ) + + assert pivoted.columns.is_monotonic_increasing + + def test_pivot_complex_aggfunc(self, data): + f = {"D": ["std"], "E": ["sum"]} + expected = data.groupby(["A", "B"]).agg(f).unstack("B") + result = data.pivot_table(index="A", columns="B", aggfunc=f) + + tm.assert_frame_equal(result, expected) + + def test_margins_no_values_no_cols(self, data): + # Regression test on pivot table: no values or cols passed. + result = data[["A", "B"]].pivot_table( + index=["A", "B"], aggfunc=len, margins=True + ) + result_list = result.tolist() + assert sum(result_list[:-1]) == result_list[-1] + + def test_margins_no_values_two_rows(self, data): + # Regression test on pivot table: no values passed but rows are a + # multi-index + result = data[["A", "B", "C"]].pivot_table( + index=["A", "B"], columns="C", aggfunc=len, margins=True + ) + assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0] + + def test_margins_no_values_one_row_one_col(self, data): + # Regression test on pivot table: no values passed but row and col + # defined + result = data[["A", "B"]].pivot_table( + index="A", columns="B", aggfunc=len, margins=True + ) + assert result.All.tolist() == [4.0, 7.0, 11.0] + + def test_margins_no_values_two_row_two_cols(self, data): + # Regression test on pivot table: no values passed but rows and cols + # are multi-indexed + data["D"] = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"] + result = data[["A", "B", "C", "D"]].pivot_table( + index=["A", "B"], columns=["C", "D"], aggfunc=len, margins=True + ) + assert result.All.tolist() == [3.0, 1.0, 4.0, 3.0, 11.0] + + @pytest.mark.parametrize("margin_name", ["foo", "one", 666, None, ["a", "b"]]) + def test_pivot_table_with_margins_set_margin_name(self, margin_name, data): + # see gh-3335 + msg = ( + f'Conflicting name "{margin_name}" in margins|' + "margins_name argument must be a string" + ) + with pytest.raises(ValueError, match=msg): + # multi-index index + pivot_table( + data, + values="D", + index=["A", "B"], + columns=["C"], + margins=True, + margins_name=margin_name, + ) + with pytest.raises(ValueError, match=msg): + # multi-index column + pivot_table( + data, + values="D", + index=["C"], + columns=["A", "B"], + margins=True, + margins_name=margin_name, + ) + with pytest.raises(ValueError, match=msg): + # non-multi-index index/column + pivot_table( + data, + values="D", + index=["A"], + columns=["B"], + margins=True, + margins_name=margin_name, + ) + + def test_pivot_timegrouper(self, using_array_manager): + df = DataFrame( + { + "Branch": "A A A A A A A B".split(), + "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(), + "Quantity": [1, 3, 5, 1, 8, 1, 9, 3], + "Date": [ + datetime(2013, 1, 1), + datetime(2013, 1, 1), + datetime(2013, 10, 1), + datetime(2013, 10, 2), + datetime(2013, 10, 1), + datetime(2013, 10, 2), + datetime(2013, 12, 2), + datetime(2013, 12, 2), + ], + } + ).set_index("Date") + + expected = DataFrame( + np.array([10, 18, 3], dtype="int64").reshape(1, 3), + index=pd.DatetimeIndex([datetime(2013, 12, 31)], freq="YE"), + columns="Carl Joe Mark".split(), + ) + expected.index.name = "Date" + expected.columns.name = "Buyer" + + result = pivot_table( + df, + index=Grouper(freq="YE"), + columns="Buyer", + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, + index="Buyer", + columns=Grouper(freq="YE"), + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected.T) + + expected = DataFrame( + np.array([1, np.nan, 3, 9, 18, np.nan]).reshape(2, 3), + index=pd.DatetimeIndex( + [datetime(2013, 1, 1), datetime(2013, 7, 1)], freq="6MS" + ), + columns="Carl Joe Mark".split(), + ) + expected.index.name = "Date" + expected.columns.name = "Buyer" + if using_array_manager: + # INFO(ArrayManager) column without NaNs can preserve int dtype + expected["Carl"] = expected["Carl"].astype("int64") + + result = pivot_table( + df, + index=Grouper(freq="6MS"), + columns="Buyer", + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, + index="Buyer", + columns=Grouper(freq="6MS"), + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected.T) + + # passing the name + df = df.reset_index() + result = pivot_table( + df, + index=Grouper(freq="6MS", key="Date"), + columns="Buyer", + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, + index="Buyer", + columns=Grouper(freq="6MS", key="Date"), + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected.T) + + msg = "'The grouper name foo is not found'" + with pytest.raises(KeyError, match=msg): + pivot_table( + df, + index=Grouper(freq="6MS", key="foo"), + columns="Buyer", + values="Quantity", + aggfunc="sum", + ) + with pytest.raises(KeyError, match=msg): + pivot_table( + df, + index="Buyer", + columns=Grouper(freq="6MS", key="foo"), + values="Quantity", + aggfunc="sum", + ) + + # passing the level + df = df.set_index("Date") + result = pivot_table( + df, + index=Grouper(freq="6MS", level="Date"), + columns="Buyer", + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, + index="Buyer", + columns=Grouper(freq="6MS", level="Date"), + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected.T) + + msg = "The level foo is not valid" + with pytest.raises(ValueError, match=msg): + pivot_table( + df, + index=Grouper(freq="6MS", level="foo"), + columns="Buyer", + values="Quantity", + aggfunc="sum", + ) + with pytest.raises(ValueError, match=msg): + pivot_table( + df, + index="Buyer", + columns=Grouper(freq="6MS", level="foo"), + values="Quantity", + aggfunc="sum", + ) + + def test_pivot_timegrouper_double(self): + # double grouper + df = DataFrame( + { + "Branch": "A A A A A A A B".split(), + "Buyer": "Carl Mark Carl Carl Joe Joe Joe Carl".split(), + "Quantity": [1, 3, 5, 1, 8, 1, 9, 3], + "Date": [ + datetime(2013, 11, 1, 13, 0), + datetime(2013, 9, 1, 13, 5), + datetime(2013, 10, 1, 20, 0), + datetime(2013, 10, 2, 10, 0), + datetime(2013, 11, 1, 20, 0), + datetime(2013, 10, 2, 10, 0), + datetime(2013, 10, 2, 12, 0), + datetime(2013, 12, 5, 14, 0), + ], + "PayDay": [ + datetime(2013, 10, 4, 0, 0), + datetime(2013, 10, 15, 13, 5), + datetime(2013, 9, 5, 20, 0), + datetime(2013, 11, 2, 10, 0), + datetime(2013, 10, 7, 20, 0), + datetime(2013, 9, 5, 10, 0), + datetime(2013, 12, 30, 12, 0), + datetime(2013, 11, 20, 14, 0), + ], + } + ) + + result = pivot_table( + df, + index=Grouper(freq="ME", key="Date"), + columns=Grouper(freq="ME", key="PayDay"), + values="Quantity", + aggfunc="sum", + ) + expected = DataFrame( + np.array( + [ + np.nan, + 3, + np.nan, + np.nan, + 6, + np.nan, + 1, + 9, + np.nan, + 9, + np.nan, + np.nan, + np.nan, + np.nan, + 3, + np.nan, + ] + ).reshape(4, 4), + index=pd.DatetimeIndex( + [ + datetime(2013, 9, 30), + datetime(2013, 10, 31), + datetime(2013, 11, 30), + datetime(2013, 12, 31), + ], + freq="ME", + ), + columns=pd.DatetimeIndex( + [ + datetime(2013, 9, 30), + datetime(2013, 10, 31), + datetime(2013, 11, 30), + datetime(2013, 12, 31), + ], + freq="ME", + ), + ) + expected.index.name = "Date" + expected.columns.name = "PayDay" + + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, + index=Grouper(freq="ME", key="PayDay"), + columns=Grouper(freq="ME", key="Date"), + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected.T) + + tuples = [ + (datetime(2013, 9, 30), datetime(2013, 10, 31)), + (datetime(2013, 10, 31), datetime(2013, 9, 30)), + (datetime(2013, 10, 31), datetime(2013, 11, 30)), + (datetime(2013, 10, 31), datetime(2013, 12, 31)), + (datetime(2013, 11, 30), datetime(2013, 10, 31)), + (datetime(2013, 12, 31), datetime(2013, 11, 30)), + ] + idx = MultiIndex.from_tuples(tuples, names=["Date", "PayDay"]) + expected = DataFrame( + np.array( + [3, np.nan, 6, np.nan, 1, np.nan, 9, np.nan, 9, np.nan, np.nan, 3] + ).reshape(6, 2), + index=idx, + columns=["A", "B"], + ) + expected.columns.name = "Branch" + + result = pivot_table( + df, + index=[Grouper(freq="ME", key="Date"), Grouper(freq="ME", key="PayDay")], + columns=["Branch"], + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, + index=["Branch"], + columns=[Grouper(freq="ME", key="Date"), Grouper(freq="ME", key="PayDay")], + values="Quantity", + aggfunc="sum", + ) + tm.assert_frame_equal(result, expected.T) + + def test_pivot_datetime_tz(self): + dates1 = pd.DatetimeIndex( + [ + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + ], + dtype="M8[ns, US/Pacific]", + name="dt1", + ) + dates2 = pd.DatetimeIndex( + [ + "2013-01-01 15:00:00", + "2013-01-01 15:00:00", + "2013-01-01 15:00:00", + "2013-02-01 15:00:00", + "2013-02-01 15:00:00", + "2013-02-01 15:00:00", + ], + dtype="M8[ns, Asia/Tokyo]", + ) + df = DataFrame( + { + "label": ["a", "a", "a", "b", "b", "b"], + "dt1": dates1, + "dt2": dates2, + "value1": np.arange(6, dtype="int64"), + "value2": [1, 2] * 3, + } + ) + + exp_idx = dates1[:3] + exp_col1 = Index(["value1", "value1"]) + exp_col2 = Index(["a", "b"], name="label") + exp_col = MultiIndex.from_arrays([exp_col1, exp_col2]) + expected = DataFrame( + [[0.0, 3.0], [1.0, 4.0], [2.0, 5.0]], index=exp_idx, columns=exp_col + ) + result = pivot_table(df, index=["dt1"], columns=["label"], values=["value1"]) + tm.assert_frame_equal(result, expected) + + exp_col1 = Index(["sum", "sum", "sum", "sum", "mean", "mean", "mean", "mean"]) + exp_col2 = Index(["value1", "value1", "value2", "value2"] * 2) + exp_col3 = pd.DatetimeIndex( + ["2013-01-01 15:00:00", "2013-02-01 15:00:00"] * 4, + dtype="M8[ns, Asia/Tokyo]", + name="dt2", + ) + exp_col = MultiIndex.from_arrays([exp_col1, exp_col2, exp_col3]) + expected1 = DataFrame( + np.array( + [ + [ + 0, + 3, + 1, + 2, + ], + [1, 4, 2, 1], + [2, 5, 1, 2], + ], + dtype="int64", + ), + index=exp_idx, + columns=exp_col[:4], + ) + expected2 = DataFrame( + np.array( + [ + [0.0, 3.0, 1.0, 2.0], + [1.0, 4.0, 2.0, 1.0], + [2.0, 5.0, 1.0, 2.0], + ], + ), + index=exp_idx, + columns=exp_col[4:], + ) + expected = concat([expected1, expected2], axis=1) + + result = pivot_table( + df, + index=["dt1"], + columns=["dt2"], + values=["value1", "value2"], + aggfunc=["sum", "mean"], + ) + tm.assert_frame_equal(result, expected) + + def test_pivot_dtaccessor(self): + # GH 8103 + dates1 = pd.DatetimeIndex( + [ + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + "2011-07-19 07:00:00", + "2011-07-19 08:00:00", + "2011-07-19 09:00:00", + ] + ) + dates2 = pd.DatetimeIndex( + [ + "2013-01-01 15:00:00", + "2013-01-01 15:00:00", + "2013-01-01 15:00:00", + "2013-02-01 15:00:00", + "2013-02-01 15:00:00", + "2013-02-01 15:00:00", + ] + ) + df = DataFrame( + { + "label": ["a", "a", "a", "b", "b", "b"], + "dt1": dates1, + "dt2": dates2, + "value1": np.arange(6, dtype="int64"), + "value2": [1, 2] * 3, + } + ) + + result = pivot_table( + df, index="label", columns=df["dt1"].dt.hour, values="value1" + ) + + exp_idx = Index(["a", "b"], name="label") + expected = DataFrame( + {7: [0.0, 3.0], 8: [1.0, 4.0], 9: [2.0, 5.0]}, + index=exp_idx, + columns=Index([7, 8, 9], dtype=np.int32, name="dt1"), + ) + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, index=df["dt2"].dt.month, columns=df["dt1"].dt.hour, values="value1" + ) + + expected = DataFrame( + {7: [0.0, 3.0], 8: [1.0, 4.0], 9: [2.0, 5.0]}, + index=Index([1, 2], dtype=np.int32, name="dt2"), + columns=Index([7, 8, 9], dtype=np.int32, name="dt1"), + ) + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, + index=df["dt2"].dt.year.values, + columns=[df["dt1"].dt.hour, df["dt2"].dt.month], + values="value1", + ) + + exp_col = MultiIndex.from_arrays( + [ + np.array([7, 7, 8, 8, 9, 9], dtype=np.int32), + np.array([1, 2] * 3, dtype=np.int32), + ], + names=["dt1", "dt2"], + ) + expected = DataFrame( + np.array([[0.0, 3.0, 1.0, 4.0, 2.0, 5.0]]), + index=Index([2013], dtype=np.int32), + columns=exp_col, + ) + tm.assert_frame_equal(result, expected) + + result = pivot_table( + df, + index=np.array(["X", "X", "X", "X", "Y", "Y"]), + columns=[df["dt1"].dt.hour, df["dt2"].dt.month], + values="value1", + ) + expected = DataFrame( + np.array( + [[0, 3, 1, np.nan, 2, np.nan], [np.nan, np.nan, np.nan, 4, np.nan, 5]] + ), + index=["X", "Y"], + columns=exp_col, + ) + tm.assert_frame_equal(result, expected) + + def test_daily(self): + rng = date_range("1/1/2000", "12/31/2004", freq="D") + ts = Series(np.arange(len(rng)), index=rng) + + result = pivot_table( + DataFrame(ts), index=ts.index.year, columns=ts.index.dayofyear + ) + result.columns = result.columns.droplevel(0) + + doy = np.asarray(ts.index.dayofyear) + + expected = {} + for y in ts.index.year.unique().values: + mask = ts.index.year == y + expected[y] = Series(ts.values[mask], index=doy[mask]) + expected = DataFrame(expected, dtype=float).T + tm.assert_frame_equal(result, expected) + + def test_monthly(self): + rng = date_range("1/1/2000", "12/31/2004", freq="ME") + ts = Series(np.arange(len(rng)), index=rng) + + result = pivot_table(DataFrame(ts), index=ts.index.year, columns=ts.index.month) + result.columns = result.columns.droplevel(0) + + month = np.asarray(ts.index.month) + expected = {} + for y in ts.index.year.unique().values: + mask = ts.index.year == y + expected[y] = Series(ts.values[mask], index=month[mask]) + expected = DataFrame(expected, dtype=float).T + tm.assert_frame_equal(result, expected) + + def test_pivot_table_with_iterator_values(self, data): + # GH 12017 + aggs = {"D": "sum", "E": "mean"} + + pivot_values_list = pivot_table( + data, index=["A"], values=list(aggs.keys()), aggfunc=aggs + ) + + pivot_values_keys = pivot_table( + data, index=["A"], values=aggs.keys(), aggfunc=aggs + ) + tm.assert_frame_equal(pivot_values_keys, pivot_values_list) + + agg_values_gen = (value for value in aggs) + pivot_values_gen = pivot_table( + data, index=["A"], values=agg_values_gen, aggfunc=aggs + ) + tm.assert_frame_equal(pivot_values_gen, pivot_values_list) + + def test_pivot_table_margins_name_with_aggfunc_list(self): + # GH 13354 + margins_name = "Weekly" + costs = DataFrame( + { + "item": ["bacon", "cheese", "bacon", "cheese"], + "cost": [2.5, 4.5, 3.2, 3.3], + "day": ["ME", "ME", "T", "T"], + } + ) + table = costs.pivot_table( + index="item", + columns="day", + margins=True, + margins_name=margins_name, + aggfunc=["mean", "max"], + ) + ix = Index(["bacon", "cheese", margins_name], name="item") + tups = [ + ("mean", "cost", "ME"), + ("mean", "cost", "T"), + ("mean", "cost", margins_name), + ("max", "cost", "ME"), + ("max", "cost", "T"), + ("max", "cost", margins_name), + ] + cols = MultiIndex.from_tuples(tups, names=[None, None, "day"]) + expected = DataFrame(table.values, index=ix, columns=cols) + tm.assert_frame_equal(table, expected) + + def test_categorical_margins(self, observed): + # GH 10989 + df = DataFrame( + {"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2} + ) + + expected = DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]]) + expected.index = Index([0, 1, "All"], name="y") + expected.columns = Index([0, 1, "All"], name="z") + + table = df.pivot_table("x", "y", "z", dropna=observed, margins=True) + tm.assert_frame_equal(table, expected) + + def test_categorical_margins_category(self, observed): + df = DataFrame( + {"x": np.arange(8), "y": np.arange(8) // 4, "z": np.arange(8) % 2} + ) + + expected = DataFrame([[1.0, 2.0, 1.5], [5, 6, 5.5], [3, 4, 3.5]]) + expected.index = Index([0, 1, "All"], name="y") + expected.columns = Index([0, 1, "All"], name="z") + + df.y = df.y.astype("category") + df.z = df.z.astype("category") + msg = "The default value of observed=False is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + table = df.pivot_table("x", "y", "z", dropna=observed, margins=True) + tm.assert_frame_equal(table, expected) + + def test_margins_casted_to_float(self): + # GH 24893 + df = DataFrame( + { + "A": [2, 4, 6, 8], + "B": [1, 4, 5, 8], + "C": [1, 3, 4, 6], + "D": ["X", "X", "Y", "Y"], + } + ) + + result = pivot_table(df, index="D", margins=True) + expected = DataFrame( + {"A": [3.0, 7.0, 5], "B": [2.5, 6.5, 4.5], "C": [2.0, 5.0, 3.5]}, + index=Index(["X", "Y", "All"], name="D"), + ) + tm.assert_frame_equal(result, expected) + + def test_pivot_with_categorical(self, observed, ordered): + # gh-21370 + idx = [np.nan, "low", "high", "low", np.nan] + col = [np.nan, "A", "B", np.nan, "A"] + df = DataFrame( + { + "In": Categorical(idx, categories=["low", "high"], ordered=ordered), + "Col": Categorical(col, categories=["A", "B"], ordered=ordered), + "Val": range(1, 6), + } + ) + # case with index/columns/value + result = df.pivot_table( + index="In", columns="Col", values="Val", observed=observed + ) + + expected_cols = pd.CategoricalIndex(["A", "B"], ordered=ordered, name="Col") + + expected = DataFrame(data=[[2.0, np.nan], [np.nan, 3.0]], columns=expected_cols) + expected.index = Index( + Categorical(["low", "high"], categories=["low", "high"], ordered=ordered), + name="In", + ) + + tm.assert_frame_equal(result, expected) + + # case with columns/value + result = df.pivot_table(columns="Col", values="Val", observed=observed) + + expected = DataFrame( + data=[[3.5, 3.0]], columns=expected_cols, index=Index(["Val"]) + ) + + tm.assert_frame_equal(result, expected) + + def test_categorical_aggfunc(self, observed): + # GH 9534 + df = DataFrame( + {"C1": ["A", "B", "C", "C"], "C2": ["a", "a", "b", "b"], "V": [1, 2, 3, 4]} + ) + df["C1"] = df["C1"].astype("category") + msg = "The default value of observed=False is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.pivot_table( + "V", index="C1", columns="C2", dropna=observed, aggfunc="count" + ) + + expected_index = pd.CategoricalIndex( + ["A", "B", "C"], categories=["A", "B", "C"], ordered=False, name="C1" + ) + expected_columns = Index(["a", "b"], name="C2") + expected_data = np.array([[1, 0], [1, 0], [0, 2]], dtype=np.int64) + expected = DataFrame( + expected_data, index=expected_index, columns=expected_columns + ) + tm.assert_frame_equal(result, expected) + + def test_categorical_pivot_index_ordering(self, observed): + # GH 8731 + df = DataFrame( + { + "Sales": [100, 120, 220], + "Month": ["January", "January", "January"], + "Year": [2013, 2014, 2013], + } + ) + months = [ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + df["Month"] = df["Month"].astype("category").cat.set_categories(months) + result = df.pivot_table( + values="Sales", + index="Month", + columns="Year", + observed=observed, + aggfunc="sum", + ) + expected_columns = Index([2013, 2014], name="Year", dtype="int64") + expected_index = pd.CategoricalIndex( + months, categories=months, ordered=False, name="Month" + ) + expected_data = [[320, 120]] + [[0, 0]] * 11 + expected = DataFrame( + expected_data, index=expected_index, columns=expected_columns + ) + if observed: + expected = expected.loc[["January"]] + + tm.assert_frame_equal(result, expected) + + def test_pivot_table_not_series(self): + # GH 4386 + # pivot_table always returns a DataFrame + # when values is not list like and columns is None + # and aggfunc is not instance of list + df = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"], "col3": [1, 3, 9]}) + + result = df.pivot_table("col1", index=["col3", "col2"], aggfunc="sum") + m = MultiIndex.from_arrays([[1, 3, 9], ["C", "D", "E"]], names=["col3", "col2"]) + expected = DataFrame([3, 4, 5], index=m, columns=["col1"]) + + tm.assert_frame_equal(result, expected) + + result = df.pivot_table("col1", index="col3", columns="col2", aggfunc="sum") + expected = DataFrame( + [[3, np.nan, np.nan], [np.nan, 4, np.nan], [np.nan, np.nan, 5]], + index=Index([1, 3, 9], name="col3"), + columns=Index(["C", "D", "E"], name="col2"), + ) + + tm.assert_frame_equal(result, expected) + + result = df.pivot_table("col1", index="col3", aggfunc=["sum"]) + m = MultiIndex.from_arrays([["sum"], ["col1"]]) + expected = DataFrame([3, 4, 5], index=Index([1, 3, 9], name="col3"), columns=m) + + tm.assert_frame_equal(result, expected) + + def test_pivot_margins_name_unicode(self): + # issue #13292 + greek = "\u0394\u03bf\u03ba\u03b9\u03bc\u03ae" + frame = DataFrame({"foo": [1, 2, 3]}, columns=Index(["foo"], dtype=object)) + table = pivot_table( + frame, index=["foo"], aggfunc=len, margins=True, margins_name=greek + ) + index = Index([1, 2, 3, greek], dtype="object", name="foo") + expected = DataFrame(index=index, columns=[]) + tm.assert_frame_equal(table, expected) + + def test_pivot_string_as_func(self): + # GH #18713 + # for correctness purposes + data = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": range(11), + } + ) + + result = pivot_table(data, index="A", columns="B", aggfunc="sum") + mi = MultiIndex( + levels=[["C"], ["one", "two"]], codes=[[0, 0], [0, 1]], names=[None, "B"] + ) + expected = DataFrame( + {("C", "one"): {"bar": 15, "foo": 13}, ("C", "two"): {"bar": 7, "foo": 20}}, + columns=mi, + ).rename_axis("A") + tm.assert_frame_equal(result, expected) + + result = pivot_table(data, index="A", columns="B", aggfunc=["sum", "mean"]) + mi = MultiIndex( + levels=[["sum", "mean"], ["C"], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 0, 0, 0], [0, 1, 0, 1]], + names=[None, None, "B"], + ) + expected = DataFrame( + { + ("mean", "C", "one"): {"bar": 5.0, "foo": 3.25}, + ("mean", "C", "two"): {"bar": 7.0, "foo": 6.666666666666667}, + ("sum", "C", "one"): {"bar": 15, "foo": 13}, + ("sum", "C", "two"): {"bar": 7, "foo": 20}, + }, + columns=mi, + ).rename_axis("A") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "f, f_numpy", + [ + ("sum", np.sum), + ("mean", np.mean), + ("std", np.std), + (["sum", "mean"], [np.sum, np.mean]), + (["sum", "std"], [np.sum, np.std]), + (["std", "mean"], [np.std, np.mean]), + ], + ) + def test_pivot_string_func_vs_func(self, f, f_numpy, data): + # GH #18713 + # for consistency purposes + data = data.drop(columns="C") + result = pivot_table(data, index="A", columns="B", aggfunc=f) + ops = "|".join(f) if isinstance(f, list) else f + msg = f"using DataFrameGroupBy.[{ops}]" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = pivot_table(data, index="A", columns="B", aggfunc=f_numpy) + tm.assert_frame_equal(result, expected) + + @pytest.mark.slow + def test_pivot_number_of_levels_larger_than_int32(self, monkeypatch): + # GH 20601 + # GH 26314: Change ValueError to PerformanceWarning + class MockUnstacker(reshape_lib._Unstacker): + def __init__(self, *args, **kwargs) -> None: + # __init__ will raise the warning + super().__init__(*args, **kwargs) + raise Exception("Don't compute final result.") + + with monkeypatch.context() as m: + m.setattr(reshape_lib, "_Unstacker", MockUnstacker) + df = DataFrame( + {"ind1": np.arange(2**16), "ind2": np.arange(2**16), "count": 0} + ) + + msg = "The following operation may generate" + with tm.assert_produces_warning(PerformanceWarning, match=msg): + with pytest.raises(Exception, match="Don't compute final result."): + df.pivot_table( + index="ind1", columns="ind2", values="count", aggfunc="count" + ) + + def test_pivot_table_aggfunc_dropna(self, dropna): + # GH 22159 + df = DataFrame( + { + "fruit": ["apple", "peach", "apple"], + "size": [1, 1, 2], + "taste": [7, 6, 6], + } + ) + + def ret_one(x): + return 1 + + def ret_sum(x): + return sum(x) + + def ret_none(x): + return np.nan + + result = pivot_table( + df, columns="fruit", aggfunc=[ret_sum, ret_none, ret_one], dropna=dropna + ) + + data = [[3, 1, np.nan, np.nan, 1, 1], [13, 6, np.nan, np.nan, 1, 1]] + col = MultiIndex.from_product( + [["ret_sum", "ret_none", "ret_one"], ["apple", "peach"]], + names=[None, "fruit"], + ) + expected = DataFrame(data, index=["size", "taste"], columns=col) + + if dropna: + expected = expected.dropna(axis="columns") + + tm.assert_frame_equal(result, expected) + + def test_pivot_table_aggfunc_scalar_dropna(self, dropna): + # GH 22159 + df = DataFrame( + {"A": ["one", "two", "one"], "x": [3, np.nan, 2], "y": [1, np.nan, np.nan]} + ) + + result = pivot_table(df, columns="A", aggfunc="mean", dropna=dropna) + + data = [[2.5, np.nan], [1, np.nan]] + col = Index(["one", "two"], name="A") + expected = DataFrame(data, index=["x", "y"], columns=col) + + if dropna: + expected = expected.dropna(axis="columns") + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("margins", [True, False]) + def test_pivot_table_empty_aggfunc(self, margins): + # GH 9186 & GH 13483 & GH 49240 + df = DataFrame( + { + "A": [2, 2, 3, 3, 2], + "id": [5, 6, 7, 8, 9], + "C": ["p", "q", "q", "p", "q"], + "D": [None, None, None, None, None], + } + ) + result = df.pivot_table( + index="A", columns="D", values="id", aggfunc=np.size, margins=margins + ) + exp_cols = Index([], name="D") + expected = DataFrame(index=Index([], dtype="int64", name="A"), columns=exp_cols) + tm.assert_frame_equal(result, expected) + + def test_pivot_table_no_column_raises(self): + # GH 10326 + def agg(arr): + return np.mean(arr) + + df = DataFrame({"X": [0, 0, 1, 1], "Y": [0, 1, 0, 1], "Z": [10, 20, 30, 40]}) + with pytest.raises(KeyError, match="notpresent"): + df.pivot_table("notpresent", "X", "Y", aggfunc=agg) + + def test_pivot_table_multiindex_columns_doctest_case(self): + # The relevant characteristic is that the call + # to maybe_downcast_to_dtype(agged[v], data[v].dtype) in + # __internal_pivot_table has `agged[v]` a DataFrame instead of Series, + # In this case this is because agged.columns is a MultiIndex and 'v' + # is only indexing on its first level. + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + + table = pivot_table( + df, + values=["D", "E"], + index=["A", "C"], + aggfunc={"D": "mean", "E": ["min", "max", "mean"]}, + ) + cols = MultiIndex.from_tuples( + [("D", "mean"), ("E", "max"), ("E", "mean"), ("E", "min")] + ) + index = MultiIndex.from_tuples( + [("bar", "large"), ("bar", "small"), ("foo", "large"), ("foo", "small")], + names=["A", "C"], + ) + vals = np.array( + [ + [5.5, 9.0, 7.5, 6.0], + [5.5, 9.0, 8.5, 8.0], + [2.0, 5.0, 4.5, 4.0], + [2.33333333, 6.0, 4.33333333, 2.0], + ] + ) + expected = DataFrame(vals, columns=cols, index=index) + expected[("E", "min")] = expected[("E", "min")].astype(np.int64) + expected[("E", "max")] = expected[("E", "max")].astype(np.int64) + tm.assert_frame_equal(table, expected) + + def test_pivot_table_sort_false(self): + # GH#39143 + df = DataFrame( + { + "a": ["d1", "d4", "d3"], + "col": ["a", "b", "c"], + "num": [23, 21, 34], + "year": ["2018", "2018", "2019"], + } + ) + result = df.pivot_table( + index=["a", "col"], columns="year", values="num", aggfunc="sum", sort=False + ) + expected = DataFrame( + [[23, np.nan], [21, np.nan], [np.nan, 34]], + columns=Index(["2018", "2019"], name="year"), + index=MultiIndex.from_arrays( + [["d1", "d4", "d3"], ["a", "b", "c"]], names=["a", "col"] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_pivot_table_nullable_margins(self): + # GH#48681 + df = DataFrame( + {"a": "A", "b": [1, 2], "sales": Series([10, 11], dtype="Int64")} + ) + + result = df.pivot_table(index="b", columns="a", margins=True, aggfunc="sum") + expected = DataFrame( + [[10, 10], [11, 11], [21, 21]], + index=Index([1, 2, "All"], name="b"), + columns=MultiIndex.from_tuples( + [("sales", "A"), ("sales", "All")], names=[None, "a"] + ), + dtype="Int64", + ) + tm.assert_frame_equal(result, expected) + + def test_pivot_table_sort_false_with_multiple_values(self): + df = DataFrame( + { + "firstname": ["John", "Michael"], + "lastname": ["Foo", "Bar"], + "height": [173, 182], + "age": [47, 33], + } + ) + result = df.pivot_table( + index=["lastname", "firstname"], values=["height", "age"], sort=False + ) + expected = DataFrame( + [[173.0, 47.0], [182.0, 33.0]], + columns=["height", "age"], + index=MultiIndex.from_tuples( + [("Foo", "John"), ("Bar", "Michael")], + names=["lastname", "firstname"], + ), + ) + tm.assert_frame_equal(result, expected) + + def test_pivot_table_with_margins_and_numeric_columns(self): + # GH 26568 + df = DataFrame([["a", "x", 1], ["a", "y", 2], ["b", "y", 3], ["b", "z", 4]]) + df.columns = [10, 20, 30] + + result = df.pivot_table( + index=10, columns=20, values=30, aggfunc="sum", fill_value=0, margins=True + ) + + expected = DataFrame([[1, 2, 0, 3], [0, 3, 4, 7], [1, 5, 4, 10]]) + expected.columns = ["x", "y", "z", "All"] + expected.index = ["a", "b", "All"] + expected.columns.name = 20 + expected.index.name = 10 + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dropna", [True, False]) + def test_pivot_ea_dtype_dropna(self, dropna): + # GH#47477 + df = DataFrame({"x": "a", "y": "b", "age": Series([20, 40], dtype="Int64")}) + result = df.pivot_table( + index="x", columns="y", values="age", aggfunc="mean", dropna=dropna + ) + expected = DataFrame( + [[30]], + index=Index(["a"], name="x"), + columns=Index(["b"], name="y"), + dtype="Float64", + ) + tm.assert_frame_equal(result, expected) + + def test_pivot_table_datetime_warning(self): + # GH#48683 + df = DataFrame( + { + "a": "A", + "b": [1, 2], + "date": pd.Timestamp("2019-12-31"), + "sales": [10.0, 11], + } + ) + with tm.assert_produces_warning(None): + result = df.pivot_table( + index=["b", "date"], columns="a", margins=True, aggfunc="sum" + ) + expected = DataFrame( + [[10.0, 10.0], [11.0, 11.0], [21.0, 21.0]], + index=MultiIndex.from_arrays( + [ + Index([1, 2, "All"], name="b"), + Index( + [pd.Timestamp("2019-12-31"), pd.Timestamp("2019-12-31"), ""], + dtype=object, + name="date", + ), + ] + ), + columns=MultiIndex.from_tuples( + [("sales", "A"), ("sales", "All")], names=[None, "a"] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_pivot_table_with_mixed_nested_tuples(self, using_array_manager): + # GH 50342 + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + ("col5",): [ + "foo", + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + ], + ("col6", 6): [ + "one", + "one", + "one", + "two", + "two", + "one", + "one", + "two", + "two", + ], + (7, "seven"): [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + } + ) + result = pivot_table( + df, values="D", index=["A", "B"], columns=[(7, "seven")], aggfunc="sum" + ) + expected = DataFrame( + [[4.0, 5.0], [7.0, 6.0], [4.0, 1.0], [np.nan, 6.0]], + columns=Index(["large", "small"], name=(7, "seven")), + index=MultiIndex.from_arrays( + [["bar", "bar", "foo", "foo"], ["one", "two"] * 2], names=["A", "B"] + ), + ) + if using_array_manager: + # INFO(ArrayManager) column without NaNs can preserve int dtype + expected["small"] = expected["small"].astype("int64") + tm.assert_frame_equal(result, expected) + + def test_pivot_table_aggfunc_nunique_with_different_values(self): + test = DataFrame( + { + "a": range(10), + "b": range(10), + "c": range(10), + "d": range(10), + } + ) + + columnval = MultiIndex.from_arrays( + [ + ["nunique" for i in range(10)], + ["c" for i in range(10)], + range(10), + ], + names=(None, None, "b"), + ) + nparr = np.full((10, 10), np.nan) + np.fill_diagonal(nparr, 1.0) + + expected = DataFrame(nparr, index=Index(range(10), name="a"), columns=columnval) + result = test.pivot_table( + index=[ + "a", + ], + columns=[ + "b", + ], + values=[ + "c", + ], + aggfunc=["nunique"], + ) + + tm.assert_frame_equal(result, expected) + + +class TestPivot: + def test_pivot(self): + data = { + "index": ["A", "B", "C", "C", "B", "A"], + "columns": ["One", "One", "One", "Two", "Two", "Two"], + "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], + } + + frame = DataFrame(data) + pivoted = frame.pivot(index="index", columns="columns", values="values") + + expected = DataFrame( + { + "One": {"A": 1.0, "B": 2.0, "C": 3.0}, + "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, + } + ) + + expected.index.name, expected.columns.name = "index", "columns" + tm.assert_frame_equal(pivoted, expected) + + # name tracking + assert pivoted.index.name == "index" + assert pivoted.columns.name == "columns" + + # don't specify values + pivoted = frame.pivot(index="index", columns="columns") + assert pivoted.index.name == "index" + assert pivoted.columns.names == (None, "columns") + + def test_pivot_duplicates(self): + data = DataFrame( + { + "a": ["bar", "bar", "foo", "foo", "foo"], + "b": ["one", "two", "one", "one", "two"], + "c": [1.0, 2.0, 3.0, 3.0, 4.0], + } + ) + with pytest.raises(ValueError, match="duplicate entries"): + data.pivot(index="a", columns="b", values="c") + + def test_pivot_empty(self): + df = DataFrame(columns=["a", "b", "c"]) + result = df.pivot(index="a", columns="b", values="c") + expected = DataFrame(index=[], columns=[]) + tm.assert_frame_equal(result, expected, check_names=False) + + @pytest.mark.parametrize("dtype", [object, "string"]) + def test_pivot_integer_bug(self, dtype): + df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")], dtype=dtype) + + result = df.pivot(index=1, columns=0, values=2) + tm.assert_index_equal(result.columns, Index(["A", "B"], name=0, dtype=dtype)) + + def test_pivot_index_none(self): + # GH#3962 + data = { + "index": ["A", "B", "C", "C", "B", "A"], + "columns": ["One", "One", "One", "Two", "Two", "Two"], + "values": [1.0, 2.0, 3.0, 3.0, 2.0, 1.0], + } + + frame = DataFrame(data).set_index("index") + result = frame.pivot(columns="columns", values="values") + expected = DataFrame( + { + "One": {"A": 1.0, "B": 2.0, "C": 3.0}, + "Two": {"A": 1.0, "B": 2.0, "C": 3.0}, + } + ) + + expected.index.name, expected.columns.name = "index", "columns" + tm.assert_frame_equal(result, expected) + + # omit values + result = frame.pivot(columns="columns") + + expected.columns = MultiIndex.from_tuples( + [("values", "One"), ("values", "Two")], names=[None, "columns"] + ) + expected.index.name = "index" + tm.assert_frame_equal(result, expected, check_names=False) + assert result.index.name == "index" + assert result.columns.names == (None, "columns") + expected.columns = expected.columns.droplevel(0) + result = frame.pivot(columns="columns", values="values") + + expected.columns.name = "columns" + tm.assert_frame_equal(result, expected) + + def test_pivot_index_list_values_none_immutable_args(self): + # GH37635 + df = DataFrame( + { + "lev1": [1, 1, 1, 2, 2, 2], + "lev2": [1, 1, 2, 1, 1, 2], + "lev3": [1, 2, 1, 2, 1, 2], + "lev4": [1, 2, 3, 4, 5, 6], + "values": [0, 1, 2, 3, 4, 5], + } + ) + index = ["lev1", "lev2"] + columns = ["lev3"] + result = df.pivot(index=index, columns=columns) + + expected = DataFrame( + np.array( + [ + [1.0, 2.0, 0.0, 1.0], + [3.0, np.nan, 2.0, np.nan], + [5.0, 4.0, 4.0, 3.0], + [np.nan, 6.0, np.nan, 5.0], + ] + ), + index=MultiIndex.from_arrays( + [(1, 1, 2, 2), (1, 2, 1, 2)], names=["lev1", "lev2"] + ), + columns=MultiIndex.from_arrays( + [("lev4", "lev4", "values", "values"), (1, 2, 1, 2)], + names=[None, "lev3"], + ), + ) + + tm.assert_frame_equal(result, expected) + + assert index == ["lev1", "lev2"] + assert columns == ["lev3"] + + def test_pivot_columns_not_given(self): + # GH#48293 + df = DataFrame({"a": [1], "b": 1}) + with pytest.raises(TypeError, match="missing 1 required keyword-only argument"): + df.pivot() # pylint: disable=missing-kwoa + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="None is cast to NaN") + def test_pivot_columns_is_none(self): + # GH#48293 + df = DataFrame({None: [1], "b": 2, "c": 3}) + result = df.pivot(columns=None) + expected = DataFrame({("b", 1): [2], ("c", 1): 3}) + tm.assert_frame_equal(result, expected) + + result = df.pivot(columns=None, index="b") + expected = DataFrame({("c", 1): 3}, index=Index([2], name="b")) + tm.assert_frame_equal(result, expected) + + result = df.pivot(columns=None, index="b", values="c") + expected = DataFrame({1: 3}, index=Index([2], name="b")) + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="None is cast to NaN") + def test_pivot_index_is_none(self): + # GH#48293 + df = DataFrame({None: [1], "b": 2, "c": 3}) + + result = df.pivot(columns="b", index=None) + expected = DataFrame({("c", 2): 3}, index=[1]) + expected.columns.names = [None, "b"] + tm.assert_frame_equal(result, expected) + + result = df.pivot(columns="b", index=None, values="c") + expected = DataFrame(3, index=[1], columns=Index([2], name="b")) + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="None is cast to NaN") + def test_pivot_values_is_none(self): + # GH#48293 + df = DataFrame({None: [1], "b": 2, "c": 3}) + + result = df.pivot(columns="b", index="c", values=None) + expected = DataFrame( + 1, index=Index([3], name="c"), columns=Index([2], name="b") + ) + tm.assert_frame_equal(result, expected) + + result = df.pivot(columns="b", values=None) + expected = DataFrame(1, index=[0], columns=Index([2], name="b")) + tm.assert_frame_equal(result, expected) + + def test_pivot_not_changing_index_name(self): + # GH#52692 + df = DataFrame({"one": ["a"], "two": 0, "three": 1}) + expected = df.copy(deep=True) + df.pivot(index="one", columns="two", values="three") + tm.assert_frame_equal(df, expected) + + def test_pivot_table_empty_dataframe_correct_index(self): + # GH 21932 + df = DataFrame([], columns=["a", "b", "value"]) + pivot = df.pivot_table(index="a", columns="b", values="value", aggfunc="count") + + expected = Index([], dtype="object", name="b") + tm.assert_index_equal(pivot.columns, expected) + + def test_pivot_table_handles_explicit_datetime_types(self): + # GH#43574 + df = DataFrame( + [ + {"a": "x", "date_str": "2023-01-01", "amount": 1}, + {"a": "y", "date_str": "2023-01-02", "amount": 2}, + {"a": "z", "date_str": "2023-01-03", "amount": 3}, + ] + ) + df["date"] = pd.to_datetime(df["date_str"]) + + with tm.assert_produces_warning(False): + pivot = df.pivot_table( + index=["a", "date"], values=["amount"], aggfunc="sum", margins=True + ) + + expected = MultiIndex.from_tuples( + [ + ("x", datetime.strptime("2023-01-01 00:00:00", "%Y-%m-%d %H:%M:%S")), + ("y", datetime.strptime("2023-01-02 00:00:00", "%Y-%m-%d %H:%M:%S")), + ("z", datetime.strptime("2023-01-03 00:00:00", "%Y-%m-%d %H:%M:%S")), + ("All", ""), + ], + names=["a", "date"], + ) + tm.assert_index_equal(pivot.index, expected) + + def test_pivot_table_with_margins_and_numeric_column_names(self): + # GH#26568 + df = DataFrame([["a", "x", 1], ["a", "y", 2], ["b", "y", 3], ["b", "z", 4]]) + + result = df.pivot_table( + index=0, columns=1, values=2, aggfunc="sum", fill_value=0, margins=True + ) + + expected = DataFrame( + [[1, 2, 0, 3], [0, 3, 4, 7], [1, 5, 4, 10]], + columns=Index(["x", "y", "z", "All"], name=1), + index=Index(["a", "b", "All"], name=0), + ) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_union_categoricals.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_union_categoricals.py new file mode 100644 index 0000000000000000000000000000000000000000..8d78d34e936f0eba01bcd2b2a1135271f7e20918 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_union_categoricals.py @@ -0,0 +1,365 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.concat import union_categoricals + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + Series, +) +import pandas._testing as tm + + +class TestUnionCategoricals: + @pytest.mark.parametrize( + "a, b, combined", + [ + (list("abc"), list("abd"), list("abcabd")), + ([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]), + ([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]), + ( + ["b", "b", np.nan, "a"], + ["a", np.nan, "c"], + ["b", "b", np.nan, "a", "a", np.nan, "c"], + ), + ( + pd.date_range("2014-01-01", "2014-01-05"), + pd.date_range("2014-01-06", "2014-01-07"), + pd.date_range("2014-01-01", "2014-01-07"), + ), + ( + pd.date_range("2014-01-01", "2014-01-05", tz="US/Central"), + pd.date_range("2014-01-06", "2014-01-07", tz="US/Central"), + pd.date_range("2014-01-01", "2014-01-07", tz="US/Central"), + ), + ( + pd.period_range("2014-01-01", "2014-01-05"), + pd.period_range("2014-01-06", "2014-01-07"), + pd.period_range("2014-01-01", "2014-01-07"), + ), + ], + ) + @pytest.mark.parametrize("box", [Categorical, CategoricalIndex, Series]) + def test_union_categorical(self, a, b, combined, box): + # GH 13361 + result = union_categoricals([box(Categorical(a)), box(Categorical(b))]) + expected = Categorical(combined) + tm.assert_categorical_equal(result, expected) + + def test_union_categorical_ordered_appearance(self): + # new categories ordered by appearance + s = Categorical(["x", "y", "z"]) + s2 = Categorical(["a", "b", "c"]) + result = union_categoricals([s, s2]) + expected = Categorical( + ["x", "y", "z", "a", "b", "c"], categories=["x", "y", "z", "a", "b", "c"] + ) + tm.assert_categorical_equal(result, expected) + + def test_union_categorical_ordered_true(self): + s = Categorical([0, 1.2, 2], ordered=True) + s2 = Categorical([0, 1.2, 2], ordered=True) + result = union_categoricals([s, s2]) + expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True) + tm.assert_categorical_equal(result, expected) + + def test_union_categorical_match_types(self): + # must exactly match types + s = Categorical([0, 1.2, 2]) + s2 = Categorical([2, 3, 4]) + msg = "dtype of categories must be the same" + with pytest.raises(TypeError, match=msg): + union_categoricals([s, s2]) + + def test_union_categorical_empty(self): + msg = "No Categoricals to union" + with pytest.raises(ValueError, match=msg): + union_categoricals([]) + + def test_union_categoricals_nan(self): + # GH 13759 + res = union_categoricals( + [Categorical([1, 2, np.nan]), Categorical([3, 2, np.nan])] + ) + exp = Categorical([1, 2, np.nan, 3, 2, np.nan]) + tm.assert_categorical_equal(res, exp) + + res = union_categoricals( + [Categorical(["A", "B"]), Categorical(["B", "B", np.nan])] + ) + exp = Categorical(["A", "B", "B", "B", np.nan]) + tm.assert_categorical_equal(res, exp) + + val1 = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-03-01"), pd.NaT] + val2 = [pd.NaT, pd.Timestamp("2011-01-01"), pd.Timestamp("2011-02-01")] + + res = union_categoricals([Categorical(val1), Categorical(val2)]) + exp = Categorical( + val1 + val2, + categories=[ + pd.Timestamp("2011-01-01"), + pd.Timestamp("2011-03-01"), + pd.Timestamp("2011-02-01"), + ], + ) + tm.assert_categorical_equal(res, exp) + + # all NaN + res = union_categoricals( + [ + Categorical(np.array([np.nan, np.nan], dtype=object)), + Categorical(["X"], categories=pd.Index(["X"], dtype=object)), + ] + ) + exp = Categorical([np.nan, np.nan, "X"]) + tm.assert_categorical_equal(res, exp) + + res = union_categoricals( + [Categorical([np.nan, np.nan]), Categorical([np.nan, np.nan])] + ) + exp = Categorical([np.nan, np.nan, np.nan, np.nan]) + tm.assert_categorical_equal(res, exp) + + @pytest.mark.parametrize("val", [[], ["1"]]) + def test_union_categoricals_empty(self, val, request, using_infer_string): + # GH 13759 + if using_infer_string and val == ["1"]: + request.applymarker(pytest.mark.xfail("object and strings dont match")) + res = union_categoricals([Categorical([]), Categorical(val)]) + exp = Categorical(val) + tm.assert_categorical_equal(res, exp) + + def test_union_categorical_same_category(self): + # check fastpath + c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4]) + c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4]) + res = union_categoricals([c1, c2]) + exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan], categories=[1, 2, 3, 4]) + tm.assert_categorical_equal(res, exp) + + def test_union_categorical_same_category_str(self): + c1 = Categorical(["z", "z", "z"], categories=["x", "y", "z"]) + c2 = Categorical(["x", "x", "x"], categories=["x", "y", "z"]) + res = union_categoricals([c1, c2]) + exp = Categorical(["z", "z", "z", "x", "x", "x"], categories=["x", "y", "z"]) + tm.assert_categorical_equal(res, exp) + + def test_union_categorical_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19096 + c1 = Categorical(["a", "b", "c"], categories=["a", "b", "c"]) + c2 = Categorical(["a", "b", "c"], categories=["b", "a", "c"]) + result = union_categoricals([c1, c2]) + expected = Categorical( + ["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"] + ) + tm.assert_categorical_equal(result, expected) + + def test_union_categoricals_ordered(self): + c1 = Categorical([1, 2, 3], ordered=True) + c2 = Categorical([1, 2, 3], ordered=False) + + msg = "Categorical.ordered must be the same" + with pytest.raises(TypeError, match=msg): + union_categoricals([c1, c2]) + + res = union_categoricals([c1, c1]) + exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True) + tm.assert_categorical_equal(res, exp) + + c1 = Categorical([1, 2, 3, np.nan], ordered=True) + c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True) + + res = union_categoricals([c1, c2]) + exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True) + tm.assert_categorical_equal(res, exp) + + c1 = Categorical([1, 2, 3], ordered=True) + c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True) + + msg = "to union ordered Categoricals, all categories must be the same" + with pytest.raises(TypeError, match=msg): + union_categoricals([c1, c2]) + + def test_union_categoricals_ignore_order(self): + # GH 15219 + c1 = Categorical([1, 2, 3], ordered=True) + c2 = Categorical([1, 2, 3], ordered=False) + + res = union_categoricals([c1, c2], ignore_order=True) + exp = Categorical([1, 2, 3, 1, 2, 3]) + tm.assert_categorical_equal(res, exp) + + msg = "Categorical.ordered must be the same" + with pytest.raises(TypeError, match=msg): + union_categoricals([c1, c2], ignore_order=False) + + res = union_categoricals([c1, c1], ignore_order=True) + exp = Categorical([1, 2, 3, 1, 2, 3]) + tm.assert_categorical_equal(res, exp) + + res = union_categoricals([c1, c1], ignore_order=False) + exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True) + tm.assert_categorical_equal(res, exp) + + c1 = Categorical([1, 2, 3, np.nan], ordered=True) + c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True) + + res = union_categoricals([c1, c2], ignore_order=True) + exp = Categorical([1, 2, 3, np.nan, 3, 2]) + tm.assert_categorical_equal(res, exp) + + c1 = Categorical([1, 2, 3], ordered=True) + c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True) + + res = union_categoricals([c1, c2], ignore_order=True) + exp = Categorical([1, 2, 3, 1, 2, 3]) + tm.assert_categorical_equal(res, exp) + + res = union_categoricals([c2, c1], ignore_order=True, sort_categories=True) + exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3]) + tm.assert_categorical_equal(res, exp) + + c1 = Categorical([1, 2, 3], ordered=True) + c2 = Categorical([4, 5, 6], ordered=True) + result = union_categoricals([c1, c2], ignore_order=True) + expected = Categorical([1, 2, 3, 4, 5, 6]) + tm.assert_categorical_equal(result, expected) + + msg = "to union ordered Categoricals, all categories must be the same" + with pytest.raises(TypeError, match=msg): + union_categoricals([c1, c2], ignore_order=False) + + with pytest.raises(TypeError, match=msg): + union_categoricals([c1, c2]) + + def test_union_categoricals_sort(self): + # GH 13846 + c1 = Categorical(["x", "y", "z"]) + c2 = Categorical(["a", "b", "c"]) + result = union_categoricals([c1, c2], sort_categories=True) + expected = Categorical( + ["x", "y", "z", "a", "b", "c"], categories=["a", "b", "c", "x", "y", "z"] + ) + tm.assert_categorical_equal(result, expected) + + # fastpath + c1 = Categorical(["a", "b"], categories=["b", "a", "c"]) + c2 = Categorical(["b", "c"], categories=["b", "a", "c"]) + result = union_categoricals([c1, c2], sort_categories=True) + expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"]) + tm.assert_categorical_equal(result, expected) + + c1 = Categorical(["a", "b"], categories=["c", "a", "b"]) + c2 = Categorical(["b", "c"], categories=["c", "a", "b"]) + result = union_categoricals([c1, c2], sort_categories=True) + expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"]) + tm.assert_categorical_equal(result, expected) + + # fastpath - skip resort + c1 = Categorical(["a", "b"], categories=["a", "b", "c"]) + c2 = Categorical(["b", "c"], categories=["a", "b", "c"]) + result = union_categoricals([c1, c2], sort_categories=True) + expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"]) + tm.assert_categorical_equal(result, expected) + + c1 = Categorical(["x", np.nan]) + c2 = Categorical([np.nan, "b"]) + result = union_categoricals([c1, c2], sort_categories=True) + expected = Categorical(["x", np.nan, np.nan, "b"], categories=["b", "x"]) + tm.assert_categorical_equal(result, expected) + + c1 = Categorical([np.nan]) + c2 = Categorical([np.nan]) + result = union_categoricals([c1, c2], sort_categories=True) + expected = Categorical([np.nan, np.nan]) + tm.assert_categorical_equal(result, expected) + + c1 = Categorical([]) + c2 = Categorical([]) + result = union_categoricals([c1, c2], sort_categories=True) + expected = Categorical([]) + tm.assert_categorical_equal(result, expected) + + c1 = Categorical(["b", "a"], categories=["b", "a", "c"], ordered=True) + c2 = Categorical(["a", "c"], categories=["b", "a", "c"], ordered=True) + msg = "Cannot use sort_categories=True with ordered Categoricals" + with pytest.raises(TypeError, match=msg): + union_categoricals([c1, c2], sort_categories=True) + + def test_union_categoricals_sort_false(self): + # GH 13846 + c1 = Categorical(["x", "y", "z"]) + c2 = Categorical(["a", "b", "c"]) + result = union_categoricals([c1, c2], sort_categories=False) + expected = Categorical( + ["x", "y", "z", "a", "b", "c"], categories=["x", "y", "z", "a", "b", "c"] + ) + tm.assert_categorical_equal(result, expected) + + def test_union_categoricals_sort_false_fastpath(self): + # fastpath + c1 = Categorical(["a", "b"], categories=["b", "a", "c"]) + c2 = Categorical(["b", "c"], categories=["b", "a", "c"]) + result = union_categoricals([c1, c2], sort_categories=False) + expected = Categorical(["a", "b", "b", "c"], categories=["b", "a", "c"]) + tm.assert_categorical_equal(result, expected) + + def test_union_categoricals_sort_false_skipresort(self): + # fastpath - skip resort + c1 = Categorical(["a", "b"], categories=["a", "b", "c"]) + c2 = Categorical(["b", "c"], categories=["a", "b", "c"]) + result = union_categoricals([c1, c2], sort_categories=False) + expected = Categorical(["a", "b", "b", "c"], categories=["a", "b", "c"]) + tm.assert_categorical_equal(result, expected) + + def test_union_categoricals_sort_false_one_nan(self): + c1 = Categorical(["x", np.nan]) + c2 = Categorical([np.nan, "b"]) + result = union_categoricals([c1, c2], sort_categories=False) + expected = Categorical(["x", np.nan, np.nan, "b"], categories=["x", "b"]) + tm.assert_categorical_equal(result, expected) + + def test_union_categoricals_sort_false_only_nan(self): + c1 = Categorical([np.nan]) + c2 = Categorical([np.nan]) + result = union_categoricals([c1, c2], sort_categories=False) + expected = Categorical([np.nan, np.nan]) + tm.assert_categorical_equal(result, expected) + + def test_union_categoricals_sort_false_empty(self): + c1 = Categorical([]) + c2 = Categorical([]) + result = union_categoricals([c1, c2], sort_categories=False) + expected = Categorical([]) + tm.assert_categorical_equal(result, expected) + + def test_union_categoricals_sort_false_ordered_true(self): + c1 = Categorical(["b", "a"], categories=["b", "a", "c"], ordered=True) + c2 = Categorical(["a", "c"], categories=["b", "a", "c"], ordered=True) + result = union_categoricals([c1, c2], sort_categories=False) + expected = Categorical( + ["b", "a", "a", "c"], categories=["b", "a", "c"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + def test_union_categorical_unwrap(self): + # GH 14173 + c1 = Categorical(["a", "b"]) + c2 = Series(["b", "c"], dtype="category") + result = union_categoricals([c1, c2]) + expected = Categorical(["a", "b", "b", "c"]) + tm.assert_categorical_equal(result, expected) + + c2 = CategoricalIndex(c2) + result = union_categoricals([c1, c2]) + tm.assert_categorical_equal(result, expected) + + c1 = Series(c1) + result = union_categoricals([c1, c2]) + tm.assert_categorical_equal(result, expected) + + msg = "all components to combine must be Categorical" + with pytest.raises(TypeError, match=msg): + union_categoricals([c1, ["a", "b", "c"]]) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_util.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..4d0be7464cb3d97697323faef5b4e7cd0d9b6df0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/reshape/test_util.py @@ -0,0 +1,79 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + date_range, +) +import pandas._testing as tm +from pandas.core.reshape.util import cartesian_product + + +class TestCartesianProduct: + def test_simple(self): + x, y = list("ABC"), [1, 22] + result1, result2 = cartesian_product([x, y]) + expected1 = np.array(["A", "A", "B", "B", "C", "C"]) + expected2 = np.array([1, 22, 1, 22, 1, 22]) + tm.assert_numpy_array_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) + + def test_datetimeindex(self): + # regression test for GitHub issue #6439 + # make sure that the ordering on datetimeindex is consistent + x = date_range("2000-01-01", periods=2) + result1, result2 = (Index(y).day for y in cartesian_product([x, x])) + expected1 = Index([1, 1, 2, 2], dtype=np.int32) + expected2 = Index([1, 2, 1, 2], dtype=np.int32) + tm.assert_index_equal(result1, expected1) + tm.assert_index_equal(result2, expected2) + + def test_tzaware_retained(self): + x = date_range("2000-01-01", periods=2, tz="US/Pacific") + y = np.array([3, 4]) + result1, result2 = cartesian_product([x, y]) + + expected = x.repeat(2) + tm.assert_index_equal(result1, expected) + + def test_tzaware_retained_categorical(self): + x = date_range("2000-01-01", periods=2, tz="US/Pacific").astype("category") + y = np.array([3, 4]) + result1, result2 = cartesian_product([x, y]) + + expected = x.repeat(2) + tm.assert_index_equal(result1, expected) + + @pytest.mark.parametrize("x, y", [[[], []], [[0, 1], []], [[], ["a", "b", "c"]]]) + def test_empty(self, x, y): + # product of empty factors + expected1 = np.array([], dtype=np.asarray(x).dtype) + expected2 = np.array([], dtype=np.asarray(y).dtype) + result1, result2 = cartesian_product([x, y]) + tm.assert_numpy_array_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) + + def test_empty_input(self): + # empty product (empty input): + result = cartesian_product([]) + expected = [] + assert result == expected + + @pytest.mark.parametrize( + "X", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]] + ) + def test_invalid_input(self, X): + msg = "Input must be a list-like of list-likes" + + with pytest.raises(TypeError, match=msg): + cartesian_product(X=X) + + def test_exceed_product_space(self): + # GH31355: raise useful error when produce space is too large + msg = "Product space too large to allocate arrays!" + + with pytest.raises(ValueError, match=msg): + dims = [np.arange(0, 22, dtype=np.int16) for i in range(12)] + [ + (np.arange(15128, dtype=np.int16)), + ] + cartesian_product(X=dims)