diff --git a/ckpts/universal/global_step80/zero/19.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/19.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..67f4ed243b23300ba055c0874338f779004eab7d --- /dev/null +++ b/ckpts/universal/global_step80/zero/19.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3054bb0f6cc611c8e789cea7a531c3d26a4ef20b7eddff1c1852cdd70dbd3624 +size 50332843 diff --git a/ckpts/universal/global_step80/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..2933bfc27b9676f59e9ebd73f2a5cb9b2736bd55 --- /dev/null +++ b/ckpts/universal/global_step80/zero/23.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c7f89f06aa4b00da404b2fd7100549ab29235ccb8ed82cea1b1efd1dce825c7 +size 33555627 diff --git a/ckpts/universal/global_step80/zero/23.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/23.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..74f62c7d9bf10de3d0cddcea44e37f560d85f0bf --- /dev/null +++ b/ckpts/universal/global_step80/zero/23.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f44ea73b7083e5f96a62955209a56ad3b9d61aba008a852f38216c74d7e2c816 +size 33555533 diff --git a/ckpts/universal/global_step80/zero/4.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/4.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..057a724b44629f035b7b00d6f131ec25a103fdeb --- /dev/null +++ b/ckpts/universal/global_step80/zero/4.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a518397adb3b70de449e6e59e242887e34da4fd749b4447c33da246f803128e +size 9372 diff --git a/ckpts/universal/global_step80/zero/4.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/4.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..0d67aeaf794e082fcb4dc41c4622445867b6fd67 --- /dev/null +++ b/ckpts/universal/global_step80/zero/4.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9e2f12c01c745c70d5e7824fcbe05bd8aae6945f3c83d225f9522af3d69ea2a +size 9387 diff --git a/ckpts/universal/global_step80/zero/4.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step80/zero/4.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..3c8fbae213ddc9d08ba4f2700e84ca3054932f52 --- /dev/null +++ b/ckpts/universal/global_step80/zero/4.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:482f8114839d6a7ea6bf930652c53bc3d9a2ce97b869af3f1716727b585ec103 +size 9293 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..bfb7acdcf481273e50c18540c141017deb52e094 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/conftest.py @@ -0,0 +1,41 @@ +import numpy as np +import pytest + +from pandas import ( + Series, + array, +) + + +@pytest.fixture(params=[None, False]) +def sort(request): + """ + Valid values for the 'sort' parameter used in the Index + setops methods (intersection, union, etc.) + + Caution: + Don't confuse this one with the "sort" fixture used + for DataFrame.append or concat. That one has + parameters [True, False]. + + We can't combine them as sort=True is not permitted + in the Index setops methods. + """ + return request.param + + +@pytest.fixture(params=["D", "3D", "-3D", "h", "2h", "-2h", "min", "2min", "s", "-3s"]) +def freq_sample(request): + """ + Valid values for 'freq' parameter used to create date_range and + timedelta_range.. + """ + return request.param + + +@pytest.fixture(params=[list, tuple, np.array, array, Series]) +def listlike_box(request): + """ + Types that may be passed as the indexer to searchsorted. + """ + return request.param diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_numeric.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..4fd807e1827ddc4faf900f15dcefa18c08d4cd0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/numeric/test_numeric.py @@ -0,0 +1,553 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + Series, +) +import pandas._testing as tm + + +class TestFloatNumericIndex: + @pytest.fixture(params=[np.float64, np.float32]) + def dtype(self, request): + return request.param + + @pytest.fixture + def simple_index(self, dtype): + values = np.arange(5, dtype=dtype) + return Index(values) + + @pytest.fixture( + params=[ + [1.5, 2, 3, 4, 5], + [0.0, 2.5, 5.0, 7.5, 10.0], + [5, 4, 3, 2, 1.5], + [10.0, 7.5, 5.0, 2.5, 0.0], + ], + ids=["mixed", "float", "mixed_dec", "float_dec"], + ) + def index(self, request, dtype): + return Index(request.param, dtype=dtype) + + @pytest.fixture + def mixed_index(self, dtype): + return Index([1.5, 2, 3, 4, 5], dtype=dtype) + + @pytest.fixture + def float_index(self, dtype): + return Index([0.0, 2.5, 5.0, 7.5, 10.0], dtype=dtype) + + def test_repr_roundtrip(self, index): + tm.assert_index_equal(eval(repr(index)), index, exact=True) + + def check_coerce(self, a, b, is_float_index=True): + assert a.equals(b) + tm.assert_index_equal(a, b, exact=False) + if is_float_index: + assert isinstance(b, Index) + else: + assert type(b) is Index + + def test_constructor_from_list_no_dtype(self): + index = Index([1.5, 2.5, 3.5]) + assert index.dtype == np.float64 + + def test_constructor(self, dtype): + index_cls = Index + + # explicit construction + index = index_cls([1, 2, 3, 4, 5], dtype=dtype) + + assert isinstance(index, index_cls) + assert index.dtype == dtype + + expected = np.array([1, 2, 3, 4, 5], dtype=dtype) + tm.assert_numpy_array_equal(index.values, expected) + + index = index_cls(np.array([1, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + index = index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + assert isinstance(index, index_cls) + assert index.dtype == dtype + + # nan handling + result = index_cls([np.nan, np.nan], dtype=dtype) + assert pd.isna(result.values).all() + + result = index_cls(np.array([np.nan]), dtype=dtype) + assert pd.isna(result.values).all() + + def test_constructor_invalid(self): + index_cls = Index + cls_name = index_cls.__name__ + # invalid + msg = ( + rf"{cls_name}\(\.\.\.\) must be called with a collection of " + r"some kind, 0\.0 was passed" + ) + with pytest.raises(TypeError, match=msg): + index_cls(0.0) + + def test_constructor_coerce(self, mixed_index, float_index): + self.check_coerce(mixed_index, Index([1.5, 2, 3, 4, 5])) + self.check_coerce(float_index, Index(np.arange(5) * 2.5)) + + result = Index(np.array(np.arange(5) * 2.5, dtype=object)) + assert result.dtype == object # as of 2.0 to match Series + self.check_coerce(float_index, result.astype("float64")) + + def test_constructor_explicit(self, mixed_index, float_index): + # these don't auto convert + self.check_coerce( + float_index, Index((np.arange(5) * 2.5), dtype=object), is_float_index=False + ) + self.check_coerce( + mixed_index, Index([1.5, 2, 3, 4, 5], dtype=object), is_float_index=False + ) + + def test_type_coercion_fail(self, any_int_numpy_dtype): + # see gh-15832 + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + Index([1, 2, 3.5], dtype=any_int_numpy_dtype) + + def test_equals_numeric(self): + index_cls = Index + + idx = index_cls([1.0, 2.0]) + assert idx.equals(idx) + assert idx.identical(idx) + + idx2 = index_cls([1.0, 2.0]) + assert idx.equals(idx2) + + idx = index_cls([1.0, np.nan]) + assert idx.equals(idx) + assert idx.identical(idx) + + idx2 = index_cls([1.0, np.nan]) + assert idx.equals(idx2) + + @pytest.mark.parametrize( + "other", + ( + Index([1, 2], dtype=np.int64), + Index([1.0, 2.0], dtype=object), + Index([1, 2], dtype=object), + ), + ) + def test_equals_numeric_other_index_type(self, other): + idx = Index([1.0, 2.0]) + assert idx.equals(other) + assert other.equals(idx) + + @pytest.mark.parametrize( + "vals", + [ + pd.date_range("2016-01-01", periods=3), + pd.timedelta_range("1 Day", periods=3), + ], + ) + def test_lookups_datetimelike_values(self, vals, dtype): + # If we have datetime64 or timedelta64 values, make sure they are + # wrapped correctly GH#31163 + ser = Series(vals, index=range(3, 6)) + ser.index = ser.index.astype(dtype) + + expected = vals[1] + + result = ser[4.0] + assert isinstance(result, type(expected)) and result == expected + result = ser[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.loc[4.0] + assert isinstance(result, type(expected)) and result == expected + result = ser.loc[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.at[4.0] + assert isinstance(result, type(expected)) and result == expected + # GH#31329 .at[4] should cast to 4.0, matching .loc behavior + result = ser.at[4] + assert isinstance(result, type(expected)) and result == expected + + result = ser.iloc[1] + assert isinstance(result, type(expected)) and result == expected + + result = ser.iat[1] + assert isinstance(result, type(expected)) and result == expected + + def test_doesnt_contain_all_the_things(self): + idx = Index([np.nan]) + assert not idx.isin([0]).item() + assert not idx.isin([1]).item() + assert idx.isin([np.nan]).item() + + def test_nan_multiple_containment(self): + index_cls = Index + + idx = index_cls([1.0, np.nan]) + tm.assert_numpy_array_equal(idx.isin([1.0]), np.array([True, False])) + tm.assert_numpy_array_equal(idx.isin([2.0, np.pi]), np.array([False, False])) + tm.assert_numpy_array_equal(idx.isin([np.nan]), np.array([False, True])) + tm.assert_numpy_array_equal(idx.isin([1.0, np.nan]), np.array([True, True])) + idx = index_cls([1.0, 2.0]) + tm.assert_numpy_array_equal(idx.isin([np.nan]), np.array([False, False])) + + def test_fillna_float64(self): + index_cls = Index + # GH 11343 + idx = Index([1.0, np.nan, 3.0], dtype=float, name="x") + # can't downcast + exp = Index([1.0, 0.1, 3.0], name="x") + tm.assert_index_equal(idx.fillna(0.1), exp, exact=True) + + # downcast + exp = index_cls([1.0, 2.0, 3.0], name="x") + tm.assert_index_equal(idx.fillna(2), exp) + + # object + exp = Index([1.0, "obj", 3.0], name="x") + tm.assert_index_equal(idx.fillna("obj"), exp, exact=True) + + def test_logical_compat(self, simple_index): + idx = simple_index + assert idx.all() == idx.values.all() + assert idx.any() == idx.values.any() + + assert idx.all() == idx.to_series().all() + assert idx.any() == idx.to_series().any() + + +class TestNumericInt: + @pytest.fixture(params=[np.int64, np.int32, np.int16, np.int8, np.uint64]) + def dtype(self, request): + return request.param + + @pytest.fixture + def simple_index(self, dtype): + return Index(range(0, 20, 2), dtype=dtype) + + def test_is_monotonic(self): + index_cls = Index + + index = index_cls([1, 2, 3, 4]) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index._is_strictly_monotonic_increasing is True + assert index.is_monotonic_decreasing is False + assert index._is_strictly_monotonic_decreasing is False + + index = index_cls([4, 3, 2, 1]) + assert index.is_monotonic_increasing is False + assert index._is_strictly_monotonic_increasing is False + assert index._is_strictly_monotonic_decreasing is True + + index = index_cls([1]) + assert index.is_monotonic_increasing is True + assert index.is_monotonic_increasing is True + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_increasing is True + assert index._is_strictly_monotonic_decreasing is True + + def test_is_strictly_monotonic(self): + index_cls = Index + + index = index_cls([1, 1, 2, 3]) + assert index.is_monotonic_increasing is True + assert index._is_strictly_monotonic_increasing is False + + index = index_cls([3, 2, 1, 1]) + assert index.is_monotonic_decreasing is True + assert index._is_strictly_monotonic_decreasing is False + + index = index_cls([1, 1]) + assert index.is_monotonic_increasing + assert index.is_monotonic_decreasing + assert not index._is_strictly_monotonic_increasing + assert not index._is_strictly_monotonic_decreasing + + def test_logical_compat(self, simple_index): + idx = simple_index + assert idx.all() == idx.values.all() + assert idx.any() == idx.values.any() + + def test_identical(self, simple_index, dtype): + index = simple_index + + idx = Index(index.copy()) + assert idx.identical(index) + + same_values_different_type = Index(idx, dtype=object) + assert not idx.identical(same_values_different_type) + + idx = index.astype(dtype=object) + idx = idx.rename("foo") + same_values = Index(idx, dtype=object) + assert same_values.identical(idx) + + assert not idx.identical(index) + assert Index(same_values, name="foo", dtype=object).identical(idx) + + assert not index.astype(dtype=object).identical(index.astype(dtype=dtype)) + + def test_cant_or_shouldnt_cast(self, dtype): + msg = r"invalid literal for int\(\) with base 10: 'foo'" + + # can't + data = ["foo", "bar", "baz"] + with pytest.raises(ValueError, match=msg): + Index(data, dtype=dtype) + + def test_view_index(self, simple_index): + index = simple_index + msg = "Passing a type in .*Index.view is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + index.view(Index) + + def test_prevent_casting(self, simple_index): + index = simple_index + result = index.astype("O") + assert result.dtype == np.object_ + + +class TestIntNumericIndex: + @pytest.fixture(params=[np.int64, np.int32, np.int16, np.int8]) + def dtype(self, request): + return request.param + + def test_constructor_from_list_no_dtype(self): + index = Index([1, 2, 3]) + assert index.dtype == np.int64 + + def test_constructor(self, dtype): + index_cls = Index + + # scalar raise Exception + msg = ( + rf"{index_cls.__name__}\(\.\.\.\) must be called with a collection of some " + "kind, 5 was passed" + ) + with pytest.raises(TypeError, match=msg): + index_cls(5) + + # copy + # pass list, coerce fine + index = index_cls([-5, 0, 1, 2], dtype=dtype) + arr = index.values.copy() + new_index = index_cls(arr, copy=True) + tm.assert_index_equal(new_index, index, exact=True) + val = int(arr[0]) + 3000 + + # this should not change index + if dtype != np.int8: + # NEP 50 won't allow assignment that would overflow + arr[0] = val + assert new_index[0] != val + + if dtype == np.int64: + # pass list, coerce fine + index = index_cls([-5, 0, 1, 2], dtype=dtype) + expected = Index([-5, 0, 1, 2], dtype=dtype) + tm.assert_index_equal(index, expected) + + # from iterable + index = index_cls(iter([-5, 0, 1, 2]), dtype=dtype) + expected = index_cls([-5, 0, 1, 2], dtype=dtype) + tm.assert_index_equal(index, expected, exact=True) + + # interpret list-like + expected = index_cls([5, 0], dtype=dtype) + for cls in [Index, index_cls]: + for idx in [ + cls([5, 0], dtype=dtype), + cls(np.array([5, 0]), dtype=dtype), + cls(Series([5, 0]), dtype=dtype), + ]: + tm.assert_index_equal(idx, expected) + + def test_constructor_corner(self, dtype): + index_cls = Index + + arr = np.array([1, 2, 3, 4], dtype=object) + + index = index_cls(arr, dtype=dtype) + assert index.values.dtype == index.dtype + if dtype == np.int64: + without_dtype = Index(arr) + # as of 2.0 we do not infer a dtype when we get an object-dtype + # ndarray of numbers, matching Series behavior + assert without_dtype.dtype == object + + tm.assert_index_equal(index, without_dtype.astype(np.int64)) + + # preventing casting + arr = np.array([1, "2", 3, "4"], dtype=object) + msg = "Trying to coerce float values to integers" + with pytest.raises(ValueError, match=msg): + index_cls(arr, dtype=dtype) + + def test_constructor_coercion_signed_to_unsigned( + self, + any_unsigned_int_numpy_dtype, + ): + # see gh-15832 + msg = "|".join( + [ + "Trying to coerce negative values to unsigned integers", + "The elements provided in the data cannot all be casted", + ] + ) + with pytest.raises(OverflowError, match=msg): + Index([-1], dtype=any_unsigned_int_numpy_dtype) + + def test_constructor_np_signed(self, any_signed_int_numpy_dtype): + # GH#47475 + scalar = np.dtype(any_signed_int_numpy_dtype).type(1) + result = Index([scalar]) + expected = Index([1], dtype=any_signed_int_numpy_dtype) + tm.assert_index_equal(result, expected, exact=True) + + def test_constructor_np_unsigned(self, any_unsigned_int_numpy_dtype): + # GH#47475 + scalar = np.dtype(any_unsigned_int_numpy_dtype).type(1) + result = Index([scalar]) + expected = Index([1], dtype=any_unsigned_int_numpy_dtype) + tm.assert_index_equal(result, expected, exact=True) + + def test_coerce_list(self): + # coerce things + arr = Index([1, 2, 3, 4]) + assert isinstance(arr, Index) + + # but not if explicit dtype passed + arr = Index([1, 2, 3, 4], dtype=object) + assert type(arr) is Index + + +class TestFloat16Index: + # float 16 indexes not supported + # GH 49535 + def test_constructor(self): + index_cls = Index + dtype = np.float16 + + msg = "float16 indexes are not supported" + + # explicit construction + with pytest.raises(NotImplementedError, match=msg): + index_cls([1, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1, 2, 3, 4, 5]), dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls([1.0, 2, 3, 4, 5], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([1.0, 2, 3, 4, 5]), dtype=dtype) + + # nan handling + with pytest.raises(NotImplementedError, match=msg): + index_cls([np.nan, np.nan], dtype=dtype) + + with pytest.raises(NotImplementedError, match=msg): + index_cls(np.array([np.nan]), dtype=dtype) + + +@pytest.mark.parametrize( + "box", + [list, lambda x: np.array(x, dtype=object), lambda x: Index(x, dtype=object)], +) +def test_uint_index_does_not_convert_to_float64(box): + # https://github.com/pandas-dev/pandas/issues/28279 + # https://github.com/pandas-dev/pandas/issues/28023 + series = Series( + [0, 1, 2, 3, 4, 5], + index=[ + 7606741985629028552, + 17876870360202815256, + 17876870360202815256, + 13106359306506049338, + 8991270399732411471, + 8991270399732411472, + ], + ) + + result = series.loc[box([7606741985629028552, 17876870360202815256])] + + expected = Index( + [7606741985629028552, 17876870360202815256, 17876870360202815256], + dtype="uint64", + ) + tm.assert_index_equal(result.index, expected) + + tm.assert_equal(result, series.iloc[:3]) + + +def test_float64_index_equals(): + # https://github.com/pandas-dev/pandas/issues/35217 + float_index = Index([1.0, 2, 3]) + string_index = Index(["1", "2", "3"]) + + result = float_index.equals(string_index) + assert result is False + + result = string_index.equals(float_index) + assert result is False + + +def test_map_dtype_inference_unsigned_to_signed(): + # GH#44609 cases where we don't retain dtype + idx = Index([1, 2, 3], dtype=np.uint64) + result = idx.map(lambda x: -x) + expected = Index([-1, -2, -3], dtype=np.int64) + tm.assert_index_equal(result, expected) + + +def test_map_dtype_inference_overflows(): + # GH#44609 case where we have to upcast + idx = Index(np.array([1, 2, 3], dtype=np.int8)) + result = idx.map(lambda x: x * 1000) + # TODO: we could plausibly try to infer down to int16 here + expected = Index([1000, 2000, 3000], dtype=np.int64) + tm.assert_index_equal(result, expected) + + +def test_view_to_datetimelike(): + # GH#55710 + idx = Index([1, 2, 3]) + res = idx.view("m8[s]") + expected = pd.TimedeltaIndex(idx.values.view("m8[s]")) + tm.assert_index_equal(res, expected) + + res2 = idx.view("m8[D]") + expected2 = idx.values.view("m8[D]") + tm.assert_numpy_array_equal(res2, expected2) + + res3 = idx.view("M8[h]") + expected3 = idx.values.view("M8[h]") + tm.assert_numpy_array_equal(res3, expected3) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_any_index.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_any_index.py new file mode 100644 index 0000000000000000000000000000000000000000..10204cfb78e8928dd69e0ea33ce40b02840959ed --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_any_index.py @@ -0,0 +1,172 @@ +""" +Tests that can be parametrized over _any_ Index object. +""" +import re + +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +import pandas._testing as tm + + +def test_boolean_context_compat(index): + # GH#7897 + with pytest.raises(ValueError, match="The truth value of a"): + if index: + pass + + with pytest.raises(ValueError, match="The truth value of a"): + bool(index) + + +def test_sort(index): + msg = "cannot sort an Index object in-place, use sort_values instead" + with pytest.raises(TypeError, match=msg): + index.sort() + + +def test_hash_error(index): + with pytest.raises(TypeError, match=f"unhashable type: '{type(index).__name__}'"): + hash(index) + + +def test_mutability(index): + if not len(index): + pytest.skip("Test doesn't make sense for empty index") + msg = "Index does not support mutable operations" + with pytest.raises(TypeError, match=msg): + index[0] = index[0] + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_map_identity_mapping(index, request): + # GH#12766 + + result = index.map(lambda x: x) + if index.dtype == object and result.dtype == bool: + assert (index == result).all() + # TODO: could work that into the 'exact="equiv"'? + return # FIXME: doesn't belong in this file anymore! + tm.assert_index_equal(result, index, exact="equiv") + + +def test_wrong_number_names(index): + names = index.nlevels * ["apple", "banana", "carrot"] + with pytest.raises(ValueError, match="^Length"): + index.names = names + + +def test_view_preserves_name(index): + assert index.view().name == index.name + + +def test_ravel(index): + # GH#19956 ravel returning ndarray is deprecated, in 2.0 returns a view on self + res = index.ravel() + tm.assert_index_equal(res, index) + + +class TestConversion: + def test_to_series(self, index): + # assert that we are creating a copy of the index + + ser = index.to_series() + assert ser.values is not index.values + assert ser.index is not index + assert ser.name == index.name + + def test_to_series_with_arguments(self, index): + # GH#18699 + + # index kwarg + ser = index.to_series(index=index) + + assert ser.values is not index.values + assert ser.index is index + assert ser.name == index.name + + # name kwarg + ser = index.to_series(name="__test") + + assert ser.values is not index.values + assert ser.index is not index + assert ser.name != index.name + + def test_tolist_matches_list(self, index): + assert index.tolist() == list(index) + + +class TestRoundTrips: + def test_pickle_roundtrip(self, index): + result = tm.round_trip_pickle(index) + tm.assert_index_equal(result, index, exact=True) + if result.nlevels > 1: + # GH#8367 round-trip with timezone + assert index.equal_levels(result) + + def test_pickle_preserves_name(self, index): + original_name, index.name = index.name, "foo" + unpickled = tm.round_trip_pickle(index) + assert index.equals(unpickled) + index.name = original_name + + +class TestIndexing: + def test_get_loc_listlike_raises_invalid_index_error(self, index): + # and never TypeError + key = np.array([0, 1], dtype=np.intp) + + with pytest.raises(InvalidIndexError, match=r"\[0 1\]"): + index.get_loc(key) + + with pytest.raises(InvalidIndexError, match=r"\[False True\]"): + index.get_loc(key.astype(bool)) + + def test_getitem_ellipsis(self, index): + # GH#21282 + result = index[...] + assert result.equals(index) + assert result is not index + + def test_slice_keeps_name(self, index): + assert index.name == index[1:].name + + @pytest.mark.parametrize("item", [101, "no_int", 2.5]) + def test_getitem_error(self, index, item): + msg = "|".join( + [ + r"index 101 is out of bounds for axis 0 with size [\d]+", + re.escape( + "only integers, slices (`:`), ellipsis (`...`), " + "numpy.newaxis (`None`) and integer or boolean arrays " + "are valid indices" + ), + "index out of bounds", # string[pyarrow] + ] + ) + with pytest.raises(IndexError, match=msg): + index[item] + + +class TestRendering: + def test_str(self, index): + # test the string repr + index.name = "foo" + assert "'foo'" in str(index) + assert type(index).__name__ in str(index) + + +class TestReductions: + def test_argmax_axis_invalid(self, index): + # GH#23081 + msg = r"`axis` must be fewer than the number of dimensions \(1\)" + with pytest.raises(ValueError, match=msg): + index.argmax(axis=1) + with pytest.raises(ValueError, match=msg): + index.argmin(axis=2) + with pytest.raises(ValueError, match=msg): + index.min(axis=-2) + with pytest.raises(ValueError, match=msg): + index.max(axis=-3) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_base.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..b7204d7af1cbbb99503714c8dc5f8660791f5112 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_base.py @@ -0,0 +1,1737 @@ +from collections import defaultdict +from datetime import datetime +from functools import partial +import math +import operator +import re + +import numpy as np +import pytest + +from pandas.compat import IS64 +from pandas.errors import InvalidIndexError +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import ( + is_any_real_numeric_dtype, + is_numeric_dtype, + is_object_dtype, +) + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + DatetimeIndex, + IntervalIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.indexes.api import ( + Index, + MultiIndex, + _get_combined_index, + ensure_index, + ensure_index_from_sequences, +) + + +class TestIndex: + @pytest.fixture + def simple_index(self) -> Index: + return Index(list("abcde")) + + def test_can_hold_identifiers(self, simple_index): + index = simple_index + key = index[0] + assert index._can_hold_identifiers_and_holds_name(key) is True + + @pytest.mark.parametrize("index", ["datetime"], indirect=True) + def test_new_axis(self, index): + # TODO: a bunch of scattered tests check this deprecation is enforced. + # de-duplicate/centralize them. + with pytest.raises(ValueError, match="Multi-dimensional indexing"): + # GH#30588 multi-dimensional indexing deprecated + index[None, :] + + def test_constructor_regular(self, index): + tm.assert_contains_all(index, index) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_constructor_casting(self, index): + # casting + arr = np.array(index) + new_index = Index(arr) + tm.assert_contains_all(arr, new_index) + tm.assert_index_equal(index, new_index) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_constructor_copy(self, index, using_infer_string): + arr = np.array(index) + new_index = Index(arr, copy=True, name="name") + assert isinstance(new_index, Index) + assert new_index.name == "name" + if using_infer_string: + tm.assert_extension_array_equal( + new_index.values, pd.array(arr, dtype="string[pyarrow_numpy]") + ) + else: + tm.assert_numpy_array_equal(arr, new_index.values) + arr[0] = "SOMEBIGLONGSTRING" + assert new_index[0] != "SOMEBIGLONGSTRING" + + @pytest.mark.parametrize("cast_as_obj", [True, False]) + @pytest.mark.parametrize( + "index", + [ + date_range( + "2015-01-01 10:00", + freq="D", + periods=3, + tz="US/Eastern", + name="Green Eggs & Ham", + ), # DTI with tz + date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz + timedelta_range("1 days", freq="D", periods=3), # td + period_range("2015-01-01", freq="D", periods=3), # period + ], + ) + def test_constructor_from_index_dtlike(self, cast_as_obj, index): + if cast_as_obj: + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + result = Index(index.astype(object)) + else: + result = Index(index) + + tm.assert_index_equal(result, index) + + if isinstance(index, DatetimeIndex): + assert result.tz == index.tz + if cast_as_obj: + # GH#23524 check that Index(dti, dtype=object) does not + # incorrectly raise ValueError, and that nanoseconds are not + # dropped + index += pd.Timedelta(nanoseconds=50) + result = Index(index, dtype=object) + assert result.dtype == np.object_ + assert list(result) == list(index) + + @pytest.mark.parametrize( + "index,has_tz", + [ + ( + date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"), + True, + ), # datetimetz + (timedelta_range("1 days", freq="D", periods=3), False), # td + (period_range("2015-01-01", freq="D", periods=3), False), # period + ], + ) + def test_constructor_from_series_dtlike(self, index, has_tz): + result = Index(Series(index)) + tm.assert_index_equal(result, index) + + if has_tz: + assert result.tz == index.tz + + def test_constructor_from_series_freq(self): + # GH 6273 + # create from a series, passing a freq + dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"] + expected = DatetimeIndex(dts, freq="MS") + + s = Series(pd.to_datetime(dts)) + result = DatetimeIndex(s, freq="MS") + + tm.assert_index_equal(result, expected) + + def test_constructor_from_frame_series_freq(self, using_infer_string): + # GH 6273 + # create from a series, passing a freq + dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"] + expected = DatetimeIndex(dts, freq="MS") + + df = DataFrame(np.random.default_rng(2).random((5, 3))) + df["date"] = dts + result = DatetimeIndex(df["date"], freq="MS") + dtype = object if not using_infer_string else "string" + assert df["date"].dtype == dtype + expected.name = "date" + tm.assert_index_equal(result, expected) + + expected = Series(dts, name="date") + tm.assert_series_equal(df["date"], expected) + + # GH 6274 + # infer freq of same + if not using_infer_string: + # Doesn't work with arrow strings + freq = pd.infer_freq(df["date"]) + assert freq == "MS" + + def test_constructor_int_dtype_nan(self): + # see gh-15187 + data = [np.nan] + expected = Index(data, dtype=np.float64) + result = Index(data, dtype="float") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "klass,dtype,na_val", + [ + (Index, np.float64, np.nan), + (DatetimeIndex, "datetime64[ns]", pd.NaT), + ], + ) + def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val): + # GH 13467 + na_list = [na_val, na_val] + expected = klass(na_list) + assert expected.dtype == dtype + + result = Index(na_list) + tm.assert_index_equal(result, expected) + + result = Index(np.array(na_list)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "vals,dtype", + [ + ([1, 2, 3, 4, 5], "int"), + ([1.1, np.nan, 2.2, 3.0], "float"), + (["A", "B", "C", np.nan], "obj"), + ], + ) + def test_constructor_simple_new(self, vals, dtype): + index = Index(vals, name=dtype) + result = index._simple_new(index.values, dtype) + tm.assert_index_equal(result, index) + + @pytest.mark.parametrize("attr", ["values", "asi8"]) + @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) + def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass): + # Test constructing with a datetimetz dtype + # .values produces numpy datetimes, so these are considered naive + # .asi8 produces integers, so these are considered epoch timestamps + # ^the above will be true in a later version. Right now we `.view` + # the i8 values as NS_DTYPE, effectively treating them as wall times. + index = date_range("2011-01-01", periods=5) + arg = getattr(index, attr) + index = index.tz_localize(tz_naive_fixture) + dtype = index.dtype + + # As of 2.0 astype raises on dt64.astype(dt64tz) + err = tz_naive_fixture is not None + msg = "Cannot use .astype to convert from timezone-naive dtype to" + + if attr == "asi8": + result = DatetimeIndex(arg).tz_localize(tz_naive_fixture) + tm.assert_index_equal(result, index) + elif klass is Index: + with pytest.raises(TypeError, match="unexpected keyword"): + klass(arg, tz=tz_naive_fixture) + else: + result = klass(arg, tz=tz_naive_fixture) + tm.assert_index_equal(result, index) + + if attr == "asi8": + if err: + with pytest.raises(TypeError, match=msg): + DatetimeIndex(arg).astype(dtype) + else: + result = DatetimeIndex(arg).astype(dtype) + tm.assert_index_equal(result, index) + else: + result = klass(arg, dtype=dtype) + tm.assert_index_equal(result, index) + + if attr == "asi8": + result = DatetimeIndex(list(arg)).tz_localize(tz_naive_fixture) + tm.assert_index_equal(result, index) + elif klass is Index: + with pytest.raises(TypeError, match="unexpected keyword"): + klass(arg, tz=tz_naive_fixture) + else: + result = klass(list(arg), tz=tz_naive_fixture) + tm.assert_index_equal(result, index) + + if attr == "asi8": + if err: + with pytest.raises(TypeError, match=msg): + DatetimeIndex(list(arg)).astype(dtype) + else: + result = DatetimeIndex(list(arg)).astype(dtype) + tm.assert_index_equal(result, index) + else: + result = klass(list(arg), dtype=dtype) + tm.assert_index_equal(result, index) + + @pytest.mark.parametrize("attr", ["values", "asi8"]) + @pytest.mark.parametrize("klass", [Index, TimedeltaIndex]) + def test_constructor_dtypes_timedelta(self, attr, klass): + index = timedelta_range("1 days", periods=5) + index = index._with_freq(None) # won't be preserved by constructors + dtype = index.dtype + + values = getattr(index, attr) + + result = klass(values, dtype=dtype) + tm.assert_index_equal(result, index) + + result = klass(list(values), dtype=dtype) + tm.assert_index_equal(result, index) + + @pytest.mark.parametrize("value", [[], iter([]), (_ for _ in [])]) + @pytest.mark.parametrize( + "klass", + [ + Index, + CategoricalIndex, + DatetimeIndex, + TimedeltaIndex, + ], + ) + def test_constructor_empty(self, value, klass): + empty = klass(value) + assert isinstance(empty, klass) + assert not len(empty) + + @pytest.mark.parametrize( + "empty,klass", + [ + (PeriodIndex([], freq="D"), PeriodIndex), + (PeriodIndex(iter([]), freq="D"), PeriodIndex), + (PeriodIndex((_ for _ in []), freq="D"), PeriodIndex), + (RangeIndex(step=1), RangeIndex), + (MultiIndex(levels=[[1, 2], ["blue", "red"]], codes=[[], []]), MultiIndex), + ], + ) + def test_constructor_empty_special(self, empty, klass): + assert isinstance(empty, klass) + assert not len(empty) + + @pytest.mark.parametrize( + "index", + [ + "datetime", + "float64", + "float32", + "int64", + "int32", + "period", + "range", + "repeats", + "timedelta", + "tuples", + "uint64", + "uint32", + ], + indirect=True, + ) + def test_view_with_args(self, index): + index.view("i8") + + @pytest.mark.parametrize( + "index", + [ + "string", + pytest.param("categorical", marks=pytest.mark.xfail(reason="gh-25464")), + "bool-object", + "bool-dtype", + "empty", + ], + indirect=True, + ) + def test_view_with_args_object_array_raises(self, index): + if index.dtype == bool: + msg = "When changing to a larger dtype" + with pytest.raises(ValueError, match=msg): + index.view("i8") + elif index.dtype == "string": + with pytest.raises(NotImplementedError, match="i8"): + index.view("i8") + else: + msg = ( + "Cannot change data-type for array of references|" + "Cannot change data-type for object array|" + ) + with pytest.raises(TypeError, match=msg): + index.view("i8") + + @pytest.mark.parametrize( + "index", + ["int64", "int32", "range"], + indirect=True, + ) + def test_astype(self, index): + casted = index.astype("i8") + + # it works! + casted.get_loc(5) + + # pass on name + index.name = "foobar" + casted = index.astype("i8") + assert casted.name == "foobar" + + def test_equals_object(self): + # same + assert Index(["a", "b", "c"]).equals(Index(["a", "b", "c"])) + + @pytest.mark.parametrize( + "comp", [Index(["a", "b"]), Index(["a", "b", "d"]), ["a", "b", "c"]] + ) + def test_not_equals_object(self, comp): + assert not Index(["a", "b", "c"]).equals(comp) + + def test_identical(self): + # index + i1 = Index(["a", "b", "c"]) + i2 = Index(["a", "b", "c"]) + + assert i1.identical(i2) + + i1 = i1.rename("foo") + assert i1.equals(i2) + assert not i1.identical(i2) + + i2 = i2.rename("foo") + assert i1.identical(i2) + + i3 = Index([("a", "a"), ("a", "b"), ("b", "a")]) + i4 = Index([("a", "a"), ("a", "b"), ("b", "a")], tupleize_cols=False) + assert not i3.identical(i4) + + def test_is_(self): + ind = Index(range(10)) + assert ind.is_(ind) + assert ind.is_(ind.view().view().view().view()) + assert not ind.is_(Index(range(10))) + assert not ind.is_(ind.copy()) + assert not ind.is_(ind.copy(deep=False)) + assert not ind.is_(ind[:]) + assert not ind.is_(np.array(range(10))) + + # quasi-implementation dependent + assert ind.is_(ind.view()) + ind2 = ind.view() + ind2.name = "bob" + assert ind.is_(ind2) + assert ind2.is_(ind) + # doesn't matter if Indices are *actually* views of underlying data, + assert not ind.is_(Index(ind.values)) + arr = np.array(range(1, 11)) + ind1 = Index(arr, copy=False) + ind2 = Index(arr, copy=False) + assert not ind1.is_(ind2) + + def test_asof_numeric_vs_bool_raises(self): + left = Index([1, 2, 3]) + right = Index([True, False], dtype=object) + + msg = "Cannot compare dtypes int64 and bool" + with pytest.raises(TypeError, match=msg): + left.asof(right[0]) + # TODO: should right.asof(left[0]) also raise? + + with pytest.raises(InvalidIndexError, match=re.escape(str(right))): + left.asof(right) + + with pytest.raises(InvalidIndexError, match=re.escape(str(left))): + right.asof(left) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_booleanindex(self, index): + bool_index = np.ones(len(index), dtype=bool) + bool_index[5:30:2] = False + + sub_index = index[bool_index] + + for i, val in enumerate(sub_index): + assert sub_index.get_loc(val) == i + + sub_index = index[list(bool_index)] + for i, val in enumerate(sub_index): + assert sub_index.get_loc(val) == i + + def test_fancy(self, simple_index): + index = simple_index + sl = index[[1, 2, 3]] + for i in sl: + assert i == sl[sl.get_loc(i)] + + @pytest.mark.parametrize( + "index", + ["string", "int64", "int32", "uint64", "uint32", "float64", "float32"], + indirect=True, + ) + @pytest.mark.parametrize("dtype", [int, np.bool_]) + def test_empty_fancy(self, index, dtype, request, using_infer_string): + if dtype is np.bool_ and using_infer_string and index.dtype == "string": + request.applymarker(pytest.mark.xfail(reason="numpy behavior is buggy")) + empty_arr = np.array([], dtype=dtype) + empty_index = type(index)([], dtype=index.dtype) + + assert index[[]].identical(empty_index) + if dtype == np.bool_: + with tm.assert_produces_warning(FutureWarning, match="is deprecated"): + assert index[empty_arr].identical(empty_index) + else: + assert index[empty_arr].identical(empty_index) + + @pytest.mark.parametrize( + "index", + ["string", "int64", "int32", "uint64", "uint32", "float64", "float32"], + indirect=True, + ) + def test_empty_fancy_raises(self, index): + # DatetimeIndex is excluded, because it overrides getitem and should + # be tested separately. + empty_farr = np.array([], dtype=np.float64) + empty_index = type(index)([], dtype=index.dtype) + + assert index[[]].identical(empty_index) + # np.ndarray only accepts ndarray of int & bool dtypes, so should Index + msg = r"arrays used as indices must be of integer" + with pytest.raises(IndexError, match=msg): + index[empty_farr] + + def test_union_dt_as_obj(self, simple_index): + # TODO: Replace with fixturesult + index = simple_index + date_index = date_range("2019-01-01", periods=10) + first_cat = index.union(date_index) + second_cat = index.union(index) + + appended = Index(np.append(index, date_index.astype("O"))) + + tm.assert_index_equal(first_cat, appended) + tm.assert_index_equal(second_cat, index) + tm.assert_contains_all(index, first_cat) + tm.assert_contains_all(index, second_cat) + tm.assert_contains_all(date_index, first_cat) + + def test_map_with_tuples(self): + # GH 12766 + + # Test that returning a single tuple from an Index + # returns an Index. + index = Index(np.arange(3), dtype=np.int64) + result = index.map(lambda x: (x,)) + expected = Index([(i,) for i in index]) + tm.assert_index_equal(result, expected) + + # Test that returning a tuple from a map of a single index + # returns a MultiIndex object. + result = index.map(lambda x: (x, x == 1)) + expected = MultiIndex.from_tuples([(i, i == 1) for i in index]) + tm.assert_index_equal(result, expected) + + def test_map_with_tuples_mi(self): + # Test that returning a single object from a MultiIndex + # returns an Index. + first_level = ["foo", "bar", "baz"] + multi_index = MultiIndex.from_tuples(zip(first_level, [1, 2, 3])) + reduced_index = multi_index.map(lambda x: x[0]) + tm.assert_index_equal(reduced_index, Index(first_level)) + + @pytest.mark.parametrize( + "index", + [ + date_range("2020-01-01", freq="D", periods=10), + period_range("2020-01-01", freq="D", periods=10), + timedelta_range("1 day", periods=10), + ], + ) + def test_map_tseries_indices_return_index(self, index): + expected = Index([1] * 10) + result = index.map(lambda x: 1) + tm.assert_index_equal(expected, result) + + def test_map_tseries_indices_accsr_return_index(self): + date_index = DatetimeIndex( + date_range("2020-01-01", periods=24, freq="h"), name="hourly" + ) + result = date_index.map(lambda x: x.hour) + expected = Index(np.arange(24, dtype="int64"), name="hourly") + tm.assert_index_equal(result, expected, exact=True) + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: Series(values, index), + ], + ) + def test_map_dictlike_simple(self, mapper): + # GH 12756 + expected = Index(["foo", "bar", "baz"]) + index = Index(np.arange(3), dtype=np.int64) + result = index.map(mapper(expected.values, index)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: Series(values, index), + ], + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_map_dictlike(self, index, mapper, request): + # GH 12756 + if isinstance(index, CategoricalIndex): + pytest.skip("Tested in test_categorical") + elif not index.is_unique: + pytest.skip("Cannot map duplicated index") + + rng = np.arange(len(index), 0, -1, dtype=np.int64) + + if index.empty: + # to match proper result coercion for uints + expected = Index([]) + elif is_numeric_dtype(index.dtype): + expected = index._constructor(rng, dtype=index.dtype) + elif type(index) is Index and index.dtype != object: + # i.e. EA-backed, for now just Nullable + expected = Index(rng, dtype=index.dtype) + else: + expected = Index(rng) + + result = index.map(mapper(expected, index)) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "mapper", + [Series(["foo", 2.0, "baz"], index=[0, 2, -1]), {0: "foo", 2: 2.0, -1: "baz"}], + ) + def test_map_with_non_function_missing_values(self, mapper): + # GH 12756 + expected = Index([2.0, np.nan, "foo"]) + result = Index([2, 1, 0]).map(mapper) + + tm.assert_index_equal(expected, result) + + def test_map_na_exclusion(self): + index = Index([1.5, np.nan, 3, np.nan, 5]) + + result = index.map(lambda x: x * 2, na_action="ignore") + expected = index * 2 + tm.assert_index_equal(result, expected) + + def test_map_defaultdict(self): + index = Index([1, 2, 3]) + default_dict = defaultdict(lambda: "blank") + default_dict[1] = "stuff" + result = index.map(default_dict) + expected = Index(["stuff", "blank", "blank"]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("name,expected", [("foo", "foo"), ("bar", None)]) + def test_append_empty_preserve_name(self, name, expected): + left = Index([], name="foo") + right = Index([1, 2, 3], name=name) + + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = left.append(right) + assert result.name == expected + + @pytest.mark.parametrize( + "index, expected", + [ + ("string", False), + ("bool-object", False), + ("bool-dtype", False), + ("categorical", False), + ("int64", True), + ("int32", True), + ("uint64", True), + ("uint32", True), + ("datetime", False), + ("float64", True), + ("float32", True), + ], + indirect=["index"], + ) + def test_is_numeric(self, index, expected): + assert is_any_real_numeric_dtype(index) is expected + + @pytest.mark.parametrize( + "index, expected", + [ + ("string", True), + ("bool-object", True), + ("bool-dtype", False), + ("categorical", False), + ("int64", False), + ("int32", False), + ("uint64", False), + ("uint32", False), + ("datetime", False), + ("float64", False), + ("float32", False), + ], + indirect=["index"], + ) + def test_is_object(self, index, expected, using_infer_string): + if using_infer_string and index.dtype == "string" and expected: + expected = False + assert is_object_dtype(index) is expected + + def test_summary(self, index): + index._summary() + + def test_format_bug(self): + # GH 14626 + # windows has different precision on datetime.datetime.now (it doesn't + # include us since the default for Timestamp shows these but Index + # formatting does not we are skipping) + now = datetime.now() + msg = r"Index\.format is deprecated" + + if not str(now).endswith("000"): + index = Index([now]) + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = index.format() + expected = [str(index[0])] + assert formatted == expected + + with tm.assert_produces_warning(FutureWarning, match=msg): + Index([]).format() + + @pytest.mark.parametrize("vals", [[1, 2.0 + 3.0j, 4.0], ["a", "b", "c"]]) + def test_format_missing(self, vals, nulls_fixture): + # 2845 + vals = list(vals) # Copy for each iteration + vals.append(nulls_fixture) + index = Index(vals, dtype=object) + # TODO: case with complex dtype? + + msg = r"Index\.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + formatted = index.format() + null_repr = "NaN" if isinstance(nulls_fixture, float) else str(nulls_fixture) + expected = [str(index[0]), str(index[1]), str(index[2]), null_repr] + + assert formatted == expected + assert index[3] is nulls_fixture + + @pytest.mark.parametrize("op", ["any", "all"]) + def test_logical_compat(self, op, simple_index): + index = simple_index + left = getattr(index, op)() + assert left == getattr(index.values, op)() + right = getattr(index.to_series(), op)() + # left might not match right exactly in e.g. string cases where the + # because we use np.any/all instead of .any/all + assert bool(left) == bool(right) + + @pytest.mark.parametrize( + "index", ["string", "int64", "int32", "float64", "float32"], indirect=True + ) + def test_drop_by_str_label(self, index): + n = len(index) + drop = index[list(range(5, 10))] + dropped = index.drop(drop) + + expected = index[list(range(5)) + list(range(10, n))] + tm.assert_index_equal(dropped, expected) + + dropped = index.drop(index[0]) + expected = index[1:] + tm.assert_index_equal(dropped, expected) + + @pytest.mark.parametrize( + "index", ["string", "int64", "int32", "float64", "float32"], indirect=True + ) + @pytest.mark.parametrize("keys", [["foo", "bar"], ["1", "bar"]]) + def test_drop_by_str_label_raises_missing_keys(self, index, keys): + with pytest.raises(KeyError, match=""): + index.drop(keys) + + @pytest.mark.parametrize( + "index", ["string", "int64", "int32", "float64", "float32"], indirect=True + ) + def test_drop_by_str_label_errors_ignore(self, index): + n = len(index) + drop = index[list(range(5, 10))] + mixed = drop.tolist() + ["foo"] + dropped = index.drop(mixed, errors="ignore") + + expected = index[list(range(5)) + list(range(10, n))] + tm.assert_index_equal(dropped, expected) + + dropped = index.drop(["foo", "bar"], errors="ignore") + expected = index[list(range(n))] + tm.assert_index_equal(dropped, expected) + + def test_drop_by_numeric_label_loc(self): + # TODO: Parametrize numeric and str tests after self.strIndex fixture + index = Index([1, 2, 3]) + dropped = index.drop(1) + expected = Index([2, 3]) + + tm.assert_index_equal(dropped, expected) + + def test_drop_by_numeric_label_raises_missing_keys(self): + index = Index([1, 2, 3]) + with pytest.raises(KeyError, match=""): + index.drop([3, 4]) + + @pytest.mark.parametrize( + "key,expected", [(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))] + ) + def test_drop_by_numeric_label_errors_ignore(self, key, expected): + index = Index([1, 2, 3]) + dropped = index.drop(key, errors="ignore") + + tm.assert_index_equal(dropped, expected) + + @pytest.mark.parametrize( + "values", + [["a", "b", ("c", "d")], ["a", ("c", "d"), "b"], [("c", "d"), "a", "b"]], + ) + @pytest.mark.parametrize("to_drop", [[("c", "d"), "a"], ["a", ("c", "d")]]) + def test_drop_tuple(self, values, to_drop): + # GH 18304 + index = Index(values) + expected = Index(["b"], dtype=object) + + result = index.drop(to_drop) + tm.assert_index_equal(result, expected) + + removed = index.drop(to_drop[0]) + for drop_me in to_drop[1], [to_drop[1]]: + result = removed.drop(drop_me) + tm.assert_index_equal(result, expected) + + removed = index.drop(to_drop[1]) + msg = rf"\"\[{re.escape(to_drop[1].__repr__())}\] not found in axis\"" + for drop_me in to_drop[1], [to_drop[1]]: + with pytest.raises(KeyError, match=msg): + removed.drop(drop_me) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_drop_with_duplicates_in_index(self, index): + # GH38051 + if len(index) == 0 or isinstance(index, MultiIndex): + pytest.skip("Test doesn't make sense for empty MultiIndex") + if isinstance(index, IntervalIndex) and not IS64: + pytest.skip("Cannot test IntervalIndex with int64 dtype on 32 bit platform") + index = index.unique().repeat(2) + expected = index[2:] + result = index.drop(index[0]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "attr", + [ + "is_monotonic_increasing", + "is_monotonic_decreasing", + "_is_strictly_monotonic_increasing", + "_is_strictly_monotonic_decreasing", + ], + ) + def test_is_monotonic_incomparable(self, attr): + index = Index([5, datetime.now(), 7]) + assert not getattr(index, attr) + + @pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}]) + @pytest.mark.parametrize( + "index,expected", + [ + (Index(["qux", "baz", "foo", "bar"]), np.array([False, False, True, True])), + (Index([]), np.array([], dtype=bool)), # empty + ], + ) + def test_isin(self, values, index, expected): + result = index.isin(values) + tm.assert_numpy_array_equal(result, expected) + + def test_isin_nan_common_object( + self, nulls_fixture, nulls_fixture2, using_infer_string + ): + # Test cartesian product of null fixtures and ensure that we don't + # mangle the various types (save a corner case with PyPy) + idx = Index(["a", nulls_fixture]) + + # all nans are the same + if ( + isinstance(nulls_fixture, float) + and isinstance(nulls_fixture2, float) + and math.isnan(nulls_fixture) + and math.isnan(nulls_fixture2) + ): + tm.assert_numpy_array_equal( + idx.isin([nulls_fixture2]), + np.array([False, True]), + ) + + elif nulls_fixture is nulls_fixture2: # should preserve NA type + tm.assert_numpy_array_equal( + idx.isin([nulls_fixture2]), + np.array([False, True]), + ) + + elif using_infer_string and idx.dtype == "string": + tm.assert_numpy_array_equal( + idx.isin([nulls_fixture2]), + np.array([False, True]), + ) + + else: + tm.assert_numpy_array_equal( + idx.isin([nulls_fixture2]), + np.array([False, False]), + ) + + def test_isin_nan_common_float64(self, nulls_fixture, float_numpy_dtype): + dtype = float_numpy_dtype + + if nulls_fixture is pd.NaT or nulls_fixture is pd.NA: + # Check 1) that we cannot construct a float64 Index with this value + # and 2) that with an NaN we do not have .isin(nulls_fixture) + msg = ( + r"float\(\) argument must be a string or a (real )?number, " + f"not {repr(type(nulls_fixture).__name__)}" + ) + with pytest.raises(TypeError, match=msg): + Index([1.0, nulls_fixture], dtype=dtype) + + idx = Index([1.0, np.nan], dtype=dtype) + assert not idx.isin([nulls_fixture]).any() + return + + idx = Index([1.0, nulls_fixture], dtype=dtype) + res = idx.isin([np.nan]) + tm.assert_numpy_array_equal(res, np.array([False, True])) + + # we cannot compare NaT with NaN + res = idx.isin([pd.NaT]) + tm.assert_numpy_array_equal(res, np.array([False, False])) + + @pytest.mark.parametrize("level", [0, -1]) + @pytest.mark.parametrize( + "index", + [ + Index(["qux", "baz", "foo", "bar"]), + Index([1.0, 2.0, 3.0, 4.0], dtype=np.float64), + ], + ) + def test_isin_level_kwarg(self, level, index): + values = index.tolist()[-2:] + ["nonexisting"] + + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(expected, index.isin(values, level=level)) + + index.name = "foobar" + tm.assert_numpy_array_equal(expected, index.isin(values, level="foobar")) + + def test_isin_level_kwarg_bad_level_raises(self, index): + for level in [10, index.nlevels, -(index.nlevels + 1)]: + with pytest.raises(IndexError, match="Too many levels"): + index.isin([], level=level) + + @pytest.mark.parametrize("label", [1.0, "foobar", "xyzzy", np.nan]) + def test_isin_level_kwarg_bad_label_raises(self, label, index): + if isinstance(index, MultiIndex): + index = index.rename(["foo", "bar"] + index.names[2:]) + msg = f"'Level {label} not found'" + else: + index = index.rename("foo") + msg = rf"Requested level \({label}\) does not match index name \(foo\)" + with pytest.raises(KeyError, match=msg): + index.isin([], level=label) + + @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])]) + def test_isin_empty(self, empty): + # see gh-16991 + index = Index(["a", "b"]) + expected = np.array([False, False]) + + result = index.isin(empty) + tm.assert_numpy_array_equal(expected, result) + + @td.skip_if_no("pyarrow") + def test_isin_arrow_string_null(self): + # GH#55821 + index = Index(["a", "b"], dtype="string[pyarrow_numpy]") + result = index.isin([None]) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "values", + [ + [1, 2, 3, 4], + [1.0, 2.0, 3.0, 4.0], + [True, True, True, True], + ["foo", "bar", "baz", "qux"], + date_range("2018-01-01", freq="D", periods=4), + ], + ) + def test_boolean_cmp(self, values): + index = Index(values) + result = index == values + expected = np.array([True, True, True, True], dtype=bool) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + @pytest.mark.parametrize("name,level", [(None, 0), ("a", "a")]) + def test_get_level_values(self, index, name, level): + expected = index.copy() + if name: + expected.name = name + + result = expected.get_level_values(level) + tm.assert_index_equal(result, expected) + + def test_slice_keep_name(self): + index = Index(["a", "b"], name="asdf") + assert index.name == index[1:].name + + @pytest.mark.parametrize( + "index", + [ + "string", + "datetime", + "int64", + "int32", + "uint64", + "uint32", + "float64", + "float32", + ], + indirect=True, + ) + def test_join_self(self, index, join_type): + result = index.join(index, how=join_type) + expected = index + if join_type == "outer": + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("method", ["strip", "rstrip", "lstrip"]) + def test_str_attribute(self, method): + # GH9068 + index = Index([" jack", "jill ", " jesse ", "frank"]) + expected = Index([getattr(str, method)(x) for x in index.values]) + + result = getattr(index.str, method)() + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "index", + [ + Index(range(5)), + date_range("2020-01-01", periods=10), + MultiIndex.from_tuples([("foo", "1"), ("bar", "3")]), + period_range(start="2000", end="2010", freq="Y"), + ], + ) + def test_str_attribute_raises(self, index): + with pytest.raises(AttributeError, match="only use .str accessor"): + index.str.repeat(2) + + @pytest.mark.parametrize( + "expand,expected", + [ + (None, Index([["a", "b", "c"], ["d", "e"], ["f"]])), + (False, Index([["a", "b", "c"], ["d", "e"], ["f"]])), + ( + True, + MultiIndex.from_tuples( + [("a", "b", "c"), ("d", "e", np.nan), ("f", np.nan, np.nan)] + ), + ), + ], + ) + def test_str_split(self, expand, expected): + index = Index(["a b c", "d e", "f"]) + if expand is not None: + result = index.str.split(expand=expand) + else: + result = index.str.split() + + tm.assert_index_equal(result, expected) + + def test_str_bool_return(self): + # test boolean case, should return np.array instead of boolean Index + index = Index(["a1", "a2", "b1", "b2"]) + result = index.str.startswith("a") + expected = np.array([True, True, False, False]) + + tm.assert_numpy_array_equal(result, expected) + assert isinstance(result, np.ndarray) + + def test_str_bool_series_indexing(self): + index = Index(["a1", "a2", "b1", "b2"]) + s = Series(range(4), index=index) + + result = s[s.index.str.startswith("a")] + expected = Series(range(2), index=["a1", "a2"]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "index,expected", [(Index(list("abcd")), True), (Index(range(4)), False)] + ) + def test_tab_completion(self, index, expected): + # GH 9910 + result = "str" in dir(index) + assert result == expected + + def test_indexing_doesnt_change_class(self): + index = Index([1, 2, 3, "a", "b", "c"]) + + assert index[1:3].identical(Index([2, 3], dtype=np.object_)) + assert index[[0, 1]].identical(Index([1, 2], dtype=np.object_)) + + def test_outer_join_sort(self): + left_index = Index(np.random.default_rng(2).permutation(15)) + right_index = date_range("2020-01-01", periods=10) + + with tm.assert_produces_warning(RuntimeWarning): + result = left_index.join(right_index, how="outer") + + with tm.assert_produces_warning(RuntimeWarning): + expected = left_index.astype(object).union(right_index.astype(object)) + + tm.assert_index_equal(result, expected) + + def test_take_fill_value(self): + # GH 12631 + index = Index(list("ABC"), name="xxx") + result = index.take(np.array([1, 0, -1])) + expected = Index(list("BAC"), name="xxx") + tm.assert_index_equal(result, expected) + + # fill_value + result = index.take(np.array([1, 0, -1]), fill_value=True) + expected = Index(["B", "A", np.nan], name="xxx") + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = index.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = Index(["B", "A", "C"], name="xxx") + tm.assert_index_equal(result, expected) + + def test_take_fill_value_none_raises(self): + index = Index(list("ABC"), name="xxx") + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + + with pytest.raises(ValueError, match=msg): + index.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + index.take(np.array([1, 0, -5]), fill_value=True) + + def test_take_bad_bounds_raises(self): + index = Index(list("ABC"), name="xxx") + with pytest.raises(IndexError, match="out of bounds"): + index.take(np.array([1, -5])) + + @pytest.mark.parametrize("name", [None, "foobar"]) + @pytest.mark.parametrize( + "labels", + [ + [], + np.array([]), + ["A", "B", "C"], + ["C", "B", "A"], + np.array(["A", "B", "C"]), + np.array(["C", "B", "A"]), + # Must preserve name even if dtype changes + date_range("20130101", periods=3).values, + date_range("20130101", periods=3).tolist(), + ], + ) + def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name, labels): + # GH6552 + index = Index([0, 1, 2]) + index.name = name + assert index.reindex(labels)[0].name == name + + @pytest.mark.parametrize("labels", [[], np.array([]), np.array([], dtype=np.int64)]) + def test_reindex_preserves_type_if_target_is_empty_list_or_array(self, labels): + # GH7774 + index = Index(list("abc")) + assert index.reindex(labels)[0].dtype.type == index.dtype.type + + @pytest.mark.parametrize( + "labels,dtype", + [ + (DatetimeIndex([]), np.datetime64), + ], + ) + def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self, labels, dtype): + # GH7774 + index = Index(list("abc")) + assert index.reindex(labels)[0].dtype.type == dtype + + def test_reindex_doesnt_preserve_type_if_target_is_empty_index_numeric( + self, any_real_numpy_dtype + ): + # GH7774 + dtype = any_real_numpy_dtype + index = Index(list("abc")) + labels = Index([], dtype=dtype) + assert index.reindex(labels)[0].dtype == dtype + + def test_reindex_no_type_preserve_target_empty_mi(self): + index = Index(list("abc")) + result = index.reindex( + MultiIndex([Index([], np.int64), Index([], np.float64)], [[], []]) + )[0] + assert result.levels[0].dtype.type == np.int64 + assert result.levels[1].dtype.type == np.float64 + + def test_reindex_ignoring_level(self): + # GH#35132 + idx = Index([1, 2, 3], name="x") + idx2 = Index([1, 2, 3, 4], name="x") + expected = Index([1, 2, 3, 4], name="x") + result, _ = idx.reindex(idx2, level="x") + tm.assert_index_equal(result, expected) + + def test_groupby(self): + index = Index(range(5)) + result = index.groupby(np.array([1, 1, 2, 2, 2])) + expected = {1: Index([0, 1]), 2: Index([2, 3, 4])} + + tm.assert_dict_equal(result, expected) + + @pytest.mark.parametrize( + "mi,expected", + [ + (MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])), + (MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False])), + ], + ) + def test_equals_op_multiindex(self, mi, expected): + # GH9785 + # test comparisons of multiindex + df = DataFrame( + [3, 6], + columns=["c"], + index=MultiIndex.from_arrays([[1, 4], [2, 5]], names=["a", "b"]), + ) + + result = df.index == mi + tm.assert_numpy_array_equal(result, expected) + + def test_equals_op_multiindex_identify(self): + df = DataFrame( + [3, 6], + columns=["c"], + index=MultiIndex.from_arrays([[1, 4], [2, 5]], names=["a", "b"]), + ) + + result = df.index == df.index + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "index", + [ + MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]), + Index(["foo", "bar", "baz"]), + ], + ) + def test_equals_op_mismatched_multiindex_raises(self, index): + df = DataFrame( + [3, 6], + columns=["c"], + index=MultiIndex.from_arrays([[1, 4], [2, 5]], names=["a", "b"]), + ) + + with pytest.raises(ValueError, match="Lengths must match"): + df.index == index + + def test_equals_op_index_vs_mi_same_length(self, using_infer_string): + mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]) + index = Index(["foo", "bar", "baz"]) + + result = mi == index + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "dt_conv, arg", + [ + (pd.to_datetime, ["2000-01-01", "2000-01-02"]), + (pd.to_timedelta, ["01:02:03", "01:02:04"]), + ], + ) + def test_dt_conversion_preserves_name(self, dt_conv, arg): + # GH 10875 + index = Index(arg, name="label") + assert index.name == dt_conv(index).name + + def test_cached_properties_not_settable(self): + index = Index([1, 2, 3]) + with pytest.raises(AttributeError, match="Can't set attribute"): + index.is_unique = False + + def test_tab_complete_warning(self, ip): + # https://github.com/pandas-dev/pandas/issues/16409 + pytest.importorskip("IPython", minversion="6.0.0") + from IPython.core.completer import provisionalcompleter + + code = "import pandas as pd; idx = pd.Index([1, 2])" + ip.run_cell(code) + + # GH 31324 newer jedi version raises Deprecation warning; + # appears resolved 2021-02-02 + with tm.assert_produces_warning(None, raise_on_extra_warnings=False): + with provisionalcompleter("ignore"): + list(ip.Completer.completions("idx.", 4)) + + def test_contains_method_removed(self, index): + # GH#30103 method removed for all types except IntervalIndex + if isinstance(index, IntervalIndex): + index.contains(1) + else: + msg = f"'{type(index).__name__}' object has no attribute 'contains'" + with pytest.raises(AttributeError, match=msg): + index.contains(1) + + def test_sortlevel(self): + index = Index([5, 4, 3, 2, 1]) + with pytest.raises(Exception, match="ascending must be a single bool value or"): + index.sortlevel(ascending="True") + + with pytest.raises( + Exception, match="ascending must be a list of bool values of length 1" + ): + index.sortlevel(ascending=[True, True]) + + with pytest.raises(Exception, match="ascending must be a bool value"): + index.sortlevel(ascending=["True"]) + + expected = Index([1, 2, 3, 4, 5]) + result = index.sortlevel(ascending=[True]) + tm.assert_index_equal(result[0], expected) + + expected = Index([1, 2, 3, 4, 5]) + result = index.sortlevel(ascending=True) + tm.assert_index_equal(result[0], expected) + + expected = Index([5, 4, 3, 2, 1]) + result = index.sortlevel(ascending=False) + tm.assert_index_equal(result[0], expected) + + def test_sortlevel_na_position(self): + # GH#51612 + idx = Index([1, np.nan]) + result = idx.sortlevel(na_position="first")[0] + expected = Index([np.nan, 1]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "periods, expected_results", + [ + (1, [np.nan, 10, 10, 10, 10]), + (2, [np.nan, np.nan, 20, 20, 20]), + (3, [np.nan, np.nan, np.nan, 30, 30]), + ], + ) + def test_index_diff(self, periods, expected_results): + # GH#19708 + idx = Index([10, 20, 30, 40, 50]) + result = idx.diff(periods) + expected = Index(expected_results) + + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "decimals, expected_results", + [ + (0, [1.0, 2.0, 3.0]), + (1, [1.2, 2.3, 3.5]), + (2, [1.23, 2.35, 3.46]), + ], + ) + def test_index_round(self, decimals, expected_results): + # GH#19708 + idx = Index([1.234, 2.345, 3.456]) + result = idx.round(decimals) + expected = Index(expected_results) + + tm.assert_index_equal(result, expected) + + +class TestMixedIntIndex: + # Mostly the tests from common.py for which the results differ + # in py2 and py3 because ints and strings are uncomparable in py3 + # (GH 13514) + @pytest.fixture + def simple_index(self) -> Index: + return Index([0, "a", 1, "b", 2, "c"]) + + def test_argsort(self, simple_index): + index = simple_index + with pytest.raises(TypeError, match="'>|<' not supported"): + index.argsort() + + def test_numpy_argsort(self, simple_index): + index = simple_index + with pytest.raises(TypeError, match="'>|<' not supported"): + np.argsort(index) + + def test_copy_name(self, simple_index): + # Check that "name" argument passed at initialization is honoured + # GH12309 + index = simple_index + + first = type(index)(index, copy=True, name="mario") + second = type(first)(first, copy=False) + + # Even though "copy=False", we want a new object. + assert first is not second + tm.assert_index_equal(first, second) + + assert first.name == "mario" + assert second.name == "mario" + + s1 = Series(2, index=first) + s2 = Series(3, index=second[:-1]) + + s3 = s1 * s2 + + assert s3.index.name == "mario" + + def test_copy_name2(self): + # Check that adding a "name" parameter to the copy is honored + # GH14302 + index = Index([1, 2], name="MyName") + index1 = index.copy() + + tm.assert_index_equal(index, index1) + + index2 = index.copy(name="NewName") + tm.assert_index_equal(index, index2, check_names=False) + assert index.name == "MyName" + assert index2.name == "NewName" + + def test_unique_na(self): + idx = Index([2, np.nan, 2, 1], name="my_index") + expected = Index([2, np.nan, 1], name="my_index") + result = idx.unique() + tm.assert_index_equal(result, expected) + + def test_logical_compat(self, simple_index): + index = simple_index + assert index.all() == index.values.all() + assert index.any() == index.values.any() + + @pytest.mark.parametrize("how", ["any", "all"]) + @pytest.mark.parametrize("dtype", [None, object, "category"]) + @pytest.mark.parametrize( + "vals,expected", + [ + ([1, 2, 3], [1, 2, 3]), + ([1.0, 2.0, 3.0], [1.0, 2.0, 3.0]), + ([1.0, 2.0, np.nan, 3.0], [1.0, 2.0, 3.0]), + (["A", "B", "C"], ["A", "B", "C"]), + (["A", np.nan, "B", "C"], ["A", "B", "C"]), + ], + ) + def test_dropna(self, how, dtype, vals, expected): + # GH 6194 + index = Index(vals, dtype=dtype) + result = index.dropna(how=how) + expected = Index(expected, dtype=dtype) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("how", ["any", "all"]) + @pytest.mark.parametrize( + "index,expected", + [ + ( + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + ), + ( + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]), + DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]), + ), + ( + TimedeltaIndex(["1 days", "2 days", "3 days"]), + TimedeltaIndex(["1 days", "2 days", "3 days"]), + ), + ( + TimedeltaIndex([pd.NaT, "1 days", "2 days", "3 days", pd.NaT]), + TimedeltaIndex(["1 days", "2 days", "3 days"]), + ), + ( + PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), + PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), + ), + ( + PeriodIndex(["2012-02", "2012-04", "NaT", "2012-05"], freq="M"), + PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"), + ), + ], + ) + def test_dropna_dt_like(self, how, index, expected): + result = index.dropna(how=how) + tm.assert_index_equal(result, expected) + + def test_dropna_invalid_how_raises(self): + msg = "invalid how option: xxx" + with pytest.raises(ValueError, match=msg): + Index([1, 2, 3]).dropna(how="xxx") + + @pytest.mark.parametrize( + "index", + [ + Index([np.nan]), + Index([np.nan, 1]), + Index([1, 2, np.nan]), + Index(["a", "b", np.nan]), + pd.to_datetime(["NaT"]), + pd.to_datetime(["NaT", "2000-01-01"]), + pd.to_datetime(["2000-01-01", "NaT", "2000-01-02"]), + pd.to_timedelta(["1 day", "NaT"]), + ], + ) + def test_is_monotonic_na(self, index): + assert index.is_monotonic_increasing is False + assert index.is_monotonic_decreasing is False + assert index._is_strictly_monotonic_increasing is False + assert index._is_strictly_monotonic_decreasing is False + + @pytest.mark.parametrize("dtype", ["f8", "m8[ns]", "M8[us]"]) + @pytest.mark.parametrize("unique_first", [True, False]) + def test_is_monotonic_unique_na(self, dtype, unique_first): + # GH 55755 + index = Index([None, 1, 1], dtype=dtype) + if unique_first: + assert index.is_unique is False + assert index.is_monotonic_increasing is False + assert index.is_monotonic_decreasing is False + else: + assert index.is_monotonic_increasing is False + assert index.is_monotonic_decreasing is False + assert index.is_unique is False + + def test_int_name_format(self, frame_or_series): + index = Index(["a", "b", "c"], name=0) + result = frame_or_series(list(range(3)), index=index) + assert "0" in repr(result) + + def test_str_to_bytes_raises(self): + # GH 26447 + index = Index([str(x) for x in range(10)]) + msg = "^'str' object cannot be interpreted as an integer$" + with pytest.raises(TypeError, match=msg): + bytes(index) + + @pytest.mark.filterwarnings("ignore:elementwise comparison failed:FutureWarning") + def test_index_with_tuple_bool(self): + # GH34123 + # TODO: also this op right now produces FutureWarning from numpy + # https://github.com/numpy/numpy/issues/11521 + idx = Index([("a", "b"), ("b", "c"), ("c", "a")]) + result = idx == ("c", "a") + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + +class TestIndexUtils: + @pytest.mark.parametrize( + "data, names, expected", + [ + ([[1, 2, 3]], None, Index([1, 2, 3])), + ([[1, 2, 3]], ["name"], Index([1, 2, 3], name="name")), + ( + [["a", "a"], ["c", "d"]], + None, + MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]]), + ), + ( + [["a", "a"], ["c", "d"]], + ["L1", "L2"], + MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]], names=["L1", "L2"]), + ), + ], + ) + def test_ensure_index_from_sequences(self, data, names, expected): + result = ensure_index_from_sequences(data, names) + tm.assert_index_equal(result, expected) + + def test_ensure_index_mixed_closed_intervals(self): + # GH27172 + intervals = [ + pd.Interval(0, 1, closed="left"), + pd.Interval(1, 2, closed="right"), + pd.Interval(2, 3, closed="neither"), + pd.Interval(3, 4, closed="both"), + ] + result = ensure_index(intervals) + expected = Index(intervals, dtype=object) + tm.assert_index_equal(result, expected) + + def test_ensure_index_uint64(self): + # with both 0 and a large-uint64, np.array will infer to float64 + # https://github.com/numpy/numpy/issues/19146 + # but a more accurate choice would be uint64 + values = [0, np.iinfo(np.uint64).max] + + result = ensure_index(values) + assert list(result) == values + + expected = Index(values, dtype="uint64") + tm.assert_index_equal(result, expected) + + def test_get_combined_index(self): + result = _get_combined_index([]) + expected = Index([]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "opname", + [ + "eq", + "ne", + "le", + "lt", + "ge", + "gt", + "add", + "radd", + "sub", + "rsub", + "mul", + "rmul", + "truediv", + "rtruediv", + "floordiv", + "rfloordiv", + "pow", + "rpow", + "mod", + "divmod", + ], +) +def test_generated_op_names(opname, index): + opname = f"__{opname}__" + method = getattr(index, opname) + assert method.__name__ == opname + + +@pytest.mark.parametrize( + "klass", + [ + partial(CategoricalIndex, data=[1]), + partial(DatetimeIndex, data=["2020-01-01"]), + partial(PeriodIndex, data=["2020-01-01"]), + partial(TimedeltaIndex, data=["1 day"]), + partial(RangeIndex, data=range(1)), + partial(IntervalIndex, data=[pd.Interval(0, 1)]), + partial(Index, data=["a"], dtype=object), + partial(MultiIndex, levels=[1], codes=[0]), + ], +) +def test_index_subclass_constructor_wrong_kwargs(klass): + # GH #19348 + with pytest.raises(TypeError, match="unexpected keyword argument"): + klass(foo="bar") + + +def test_deprecated_fastpath(): + msg = "[Uu]nexpected keyword argument" + with pytest.raises(TypeError, match=msg): + Index(np.array(["a", "b"], dtype=object), name="test", fastpath=True) + + with pytest.raises(TypeError, match=msg): + Index(np.array([1, 2, 3], dtype="int64"), name="test", fastpath=True) + + with pytest.raises(TypeError, match=msg): + RangeIndex(0, 5, 2, name="test", fastpath=True) + + with pytest.raises(TypeError, match=msg): + CategoricalIndex(["a", "b", "c"], name="test", fastpath=True) + + +def test_shape_of_invalid_index(): + # Pre-2.0, it was possible to create "invalid" index objects backed by + # a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125 + # about this). However, as long as this is not solved in general,this test ensures + # that the returned shape is consistent with this underlying array for + # compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775) + idx = Index([0, 1, 2, 3]) + with pytest.raises(ValueError, match="Multi-dimensional indexing"): + # GH#30588 multi-dimensional indexing deprecated + idx[:, None] + + +@pytest.mark.parametrize("dtype", [None, np.int64, np.uint64, np.float64]) +def test_validate_1d_input(dtype): + # GH#27125 check that we do not have >1-dimensional input + msg = "Index data must be 1-dimensional" + + arr = np.arange(8).reshape(2, 2, 2) + with pytest.raises(ValueError, match=msg): + Index(arr, dtype=dtype) + + df = DataFrame(arr.reshape(4, 2)) + with pytest.raises(ValueError, match=msg): + Index(df, dtype=dtype) + + # GH#13601 trying to assign a multi-dimensional array to an index is not allowed + ser = Series(0, range(4)) + with pytest.raises(ValueError, match=msg): + ser.index = np.array([[2, 3]] * 4, dtype=dtype) + + +@pytest.mark.parametrize( + "klass, extra_kwargs", + [ + [Index, {}], + *[[lambda x: Index(x, dtype=dtyp), {}] for dtyp in tm.ALL_REAL_NUMPY_DTYPES], + [DatetimeIndex, {}], + [TimedeltaIndex, {}], + [PeriodIndex, {"freq": "Y"}], + ], +) +def test_construct_from_memoryview(klass, extra_kwargs): + # GH 13120 + result = klass(memoryview(np.arange(2000, 2005)), **extra_kwargs) + expected = klass(list(range(2000, 2005)), **extra_kwargs) + tm.assert_index_equal(result, expected, exact=True) + + +@pytest.mark.parametrize("op", [operator.lt, operator.gt]) +def test_nan_comparison_same_object(op): + # GH#47105 + idx = Index([np.nan]) + expected = np.array([False]) + + result = op(idx, idx) + tm.assert_numpy_array_equal(result, expected) + + result = op(idx, idx.copy()) + tm.assert_numpy_array_equal(result, expected) + + +@td.skip_if_no("pyarrow") +def test_is_monotonic_pyarrow_list_type(): + # GH 57333 + import pyarrow as pa + + idx = Index([[1], [2, 3]], dtype=pd.ArrowDtype(pa.list_(pa.int64()))) + assert not idx.is_monotonic_increasing + assert not idx.is_monotonic_decreasing diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_common.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..80c39322b9b810a73efa2c2739d1928eb21ae711 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_common.py @@ -0,0 +1,511 @@ +""" +Collection of tests asserting things that should be true for +any index subclass except for MultiIndex. Makes use of the `index_flat` +fixture defined in pandas/conftest.py. +""" +from copy import ( + copy, + deepcopy, +) +import re + +import numpy as np +import pytest + +from pandas.compat import IS64 +from pandas.compat.numpy import np_version_gte1p25 + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_numeric_dtype, +) + +import pandas as pd +from pandas import ( + CategoricalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, +) +import pandas._testing as tm + + +class TestCommon: + @pytest.mark.parametrize("name", [None, "new_name"]) + def test_to_frame(self, name, index_flat, using_copy_on_write): + # see GH#15230, GH#22580 + idx = index_flat + + if name: + idx_name = name + else: + idx_name = idx.name or 0 + + df = idx.to_frame(name=idx_name) + + assert df.index is idx + assert len(df.columns) == 1 + assert df.columns[0] == idx_name + if not using_copy_on_write: + assert df[idx_name].values is not idx.values + + df = idx.to_frame(index=False, name=idx_name) + assert df.index is not idx + + def test_droplevel(self, index_flat): + # GH 21115 + # MultiIndex is tested separately in test_multi.py + index = index_flat + + assert index.droplevel([]).equals(index) + + for level in [index.name, [index.name]]: + if isinstance(index.name, tuple) and level is index.name: + # GH 21121 : droplevel with tuple name + continue + msg = ( + "Cannot remove 1 levels from an index with 1 levels: at least one " + "level must be left." + ) + with pytest.raises(ValueError, match=msg): + index.droplevel(level) + + for level in "wrong", ["wrong"]: + with pytest.raises( + KeyError, + match=r"'Requested level \(wrong\) does not match index name \(None\)'", + ): + index.droplevel(level) + + def test_constructor_non_hashable_name(self, index_flat): + # GH 20527 + index = index_flat + + message = "Index.name must be a hashable type" + renamed = [["1"]] + + # With .rename() + with pytest.raises(TypeError, match=message): + index.rename(name=renamed) + + # With .set_names() + with pytest.raises(TypeError, match=message): + index.set_names(names=renamed) + + def test_constructor_unwraps_index(self, index_flat): + a = index_flat + # Passing dtype is necessary for Index([True, False], dtype=object) + # case. + b = type(a)(a, dtype=a.dtype) + tm.assert_equal(a._data, b._data) + + def test_to_flat_index(self, index_flat): + # 22866 + index = index_flat + + result = index.to_flat_index() + tm.assert_index_equal(result, index) + + def test_set_name_methods(self, index_flat): + # MultiIndex tested separately + index = index_flat + new_name = "This is the new name for this index" + + original_name = index.name + new_ind = index.set_names([new_name]) + assert new_ind.name == new_name + assert index.name == original_name + res = index.rename(new_name, inplace=True) + + # should return None + assert res is None + assert index.name == new_name + assert index.names == [new_name] + with pytest.raises(ValueError, match="Level must be None"): + index.set_names("a", level=0) + + # rename in place just leaves tuples and other containers alone + name = ("A", "B") + index.rename(name, inplace=True) + assert index.name == name + assert index.names == [name] + + @pytest.mark.xfail + def test_set_names_single_label_no_level(self, index_flat): + with pytest.raises(TypeError, match="list-like"): + # should still fail even if it would be the right length + index_flat.set_names("a") + + def test_copy_and_deepcopy(self, index_flat): + index = index_flat + + for func in (copy, deepcopy): + idx_copy = func(index) + assert idx_copy is not index + assert idx_copy.equals(index) + + new_copy = index.copy(deep=True, name="banana") + assert new_copy.name == "banana" + + def test_copy_name(self, index_flat): + # GH#12309: Check that the "name" argument + # passed at initialization is honored. + index = index_flat + + first = type(index)(index, copy=True, name="mario") + second = type(first)(first, copy=False) + + # Even though "copy=False", we want a new object. + assert first is not second + tm.assert_index_equal(first, second) + + # Not using tm.assert_index_equal() since names differ. + assert index.equals(first) + + assert first.name == "mario" + assert second.name == "mario" + + # TODO: belongs in series arithmetic tests? + s1 = pd.Series(2, index=first) + s2 = pd.Series(3, index=second[:-1]) + # See GH#13365 + s3 = s1 * s2 + assert s3.index.name == "mario" + + def test_copy_name2(self, index_flat): + # GH#35592 + index = index_flat + + assert index.copy(name="mario").name == "mario" + + with pytest.raises(ValueError, match="Length of new names must be 1, got 2"): + index.copy(name=["mario", "luigi"]) + + msg = f"{type(index).__name__}.name must be a hashable type" + with pytest.raises(TypeError, match=msg): + index.copy(name=[["mario"]]) + + def test_unique_level(self, index_flat): + # don't test a MultiIndex here (as its tested separated) + index = index_flat + + # GH 17896 + expected = index.drop_duplicates() + for level in [0, index.name, None]: + result = index.unique(level=level) + tm.assert_index_equal(result, expected) + + msg = "Too many levels: Index has only 1 level, not 4" + with pytest.raises(IndexError, match=msg): + index.unique(level=3) + + msg = ( + rf"Requested level \(wrong\) does not match index name " + rf"\({re.escape(index.name.__repr__())}\)" + ) + with pytest.raises(KeyError, match=msg): + index.unique(level="wrong") + + def test_unique(self, index_flat): + # MultiIndex tested separately + index = index_flat + if not len(index): + pytest.skip("Skip check for empty Index and MultiIndex") + + idx = index[[0] * 5] + idx_unique = index[[0]] + + # We test against `idx_unique`, so first we make sure it's unique + # and doesn't contain nans. + assert idx_unique.is_unique is True + try: + assert idx_unique.hasnans is False + except NotImplementedError: + pass + + result = idx.unique() + tm.assert_index_equal(result, idx_unique) + + # nans: + if not index._can_hold_na: + pytest.skip("Skip na-check if index cannot hold na") + + vals = index._values[[0] * 5] + vals[0] = np.nan + + vals_unique = vals[:2] + idx_nan = index._shallow_copy(vals) + idx_unique_nan = index._shallow_copy(vals_unique) + assert idx_unique_nan.is_unique is True + + assert idx_nan.dtype == index.dtype + assert idx_unique_nan.dtype == index.dtype + + expected = idx_unique_nan + for pos, i in enumerate([idx_nan, idx_unique_nan]): + result = i.unique() + tm.assert_index_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:Period with BDay freq:FutureWarning") + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_searchsorted_monotonic(self, index_flat, request): + # GH17271 + index = index_flat + # not implemented for tuple searches in MultiIndex + # or Intervals searches in IntervalIndex + if isinstance(index, pd.IntervalIndex): + mark = pytest.mark.xfail( + reason="IntervalIndex.searchsorted does not support Interval arg", + raises=NotImplementedError, + ) + request.applymarker(mark) + + # nothing to test if the index is empty + if index.empty: + pytest.skip("Skip check for empty Index") + value = index[0] + + # determine the expected results (handle dupes for 'right') + expected_left, expected_right = 0, (index == value).argmin() + if expected_right == 0: + # all values are the same, expected_right should be length + expected_right = len(index) + + # test _searchsorted_monotonic in all cases + # test searchsorted only for increasing + if index.is_monotonic_increasing: + ssm_left = index._searchsorted_monotonic(value, side="left") + assert expected_left == ssm_left + + ssm_right = index._searchsorted_monotonic(value, side="right") + assert expected_right == ssm_right + + ss_left = index.searchsorted(value, side="left") + assert expected_left == ss_left + + ss_right = index.searchsorted(value, side="right") + assert expected_right == ss_right + + elif index.is_monotonic_decreasing: + ssm_left = index._searchsorted_monotonic(value, side="left") + assert expected_left == ssm_left + + ssm_right = index._searchsorted_monotonic(value, side="right") + assert expected_right == ssm_right + else: + # non-monotonic should raise. + msg = "index must be monotonic increasing or decreasing" + with pytest.raises(ValueError, match=msg): + index._searchsorted_monotonic(value, side="left") + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_drop_duplicates(self, index_flat, keep): + # MultiIndex is tested separately + index = index_flat + if isinstance(index, RangeIndex): + pytest.skip( + "RangeIndex is tested in test_drop_duplicates_no_duplicates " + "as it cannot hold duplicates" + ) + if len(index) == 0: + pytest.skip( + "empty index is tested in test_drop_duplicates_no_duplicates " + "as it cannot hold duplicates" + ) + + # make unique index + holder = type(index) + unique_values = list(set(index)) + dtype = index.dtype if is_numeric_dtype(index) else None + unique_idx = holder(unique_values, dtype=dtype) + + # make duplicated index + n = len(unique_idx) + duplicated_selection = np.random.default_rng(2).choice(n, int(n * 1.5)) + idx = holder(unique_idx.values[duplicated_selection]) + + # Series.duplicated is tested separately + expected_duplicated = ( + pd.Series(duplicated_selection).duplicated(keep=keep).values + ) + tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated) + + # Series.drop_duplicates is tested separately + expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep)) + tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_drop_duplicates_no_duplicates(self, index_flat): + # MultiIndex is tested separately + index = index_flat + + # make unique index + if isinstance(index, RangeIndex): + # RangeIndex cannot have duplicates + unique_idx = index + else: + holder = type(index) + unique_values = list(set(index)) + dtype = index.dtype if is_numeric_dtype(index) else None + unique_idx = holder(unique_values, dtype=dtype) + + # check on unique index + expected_duplicated = np.array([False] * len(unique_idx), dtype="bool") + tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated) + result_dropped = unique_idx.drop_duplicates() + tm.assert_index_equal(result_dropped, unique_idx) + # validate shallow copy + assert result_dropped is not unique_idx + + def test_drop_duplicates_inplace(self, index): + msg = r"drop_duplicates\(\) got an unexpected keyword argument" + with pytest.raises(TypeError, match=msg): + index.drop_duplicates(inplace=True) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_has_duplicates(self, index_flat): + # MultiIndex tested separately in: + # tests/indexes/multi/test_unique_and_duplicates. + index = index_flat + holder = type(index) + if not len(index) or isinstance(index, RangeIndex): + # MultiIndex tested separately in: + # tests/indexes/multi/test_unique_and_duplicates. + # RangeIndex is unique by definition. + pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex") + + idx = holder([index[0]] * 5) + assert idx.is_unique is False + assert idx.has_duplicates is True + + @pytest.mark.parametrize( + "dtype", + ["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"], + ) + def test_astype_preserves_name(self, index, dtype): + # https://github.com/pandas-dev/pandas/issues/32013 + if isinstance(index, MultiIndex): + index.names = ["idx" + str(i) for i in range(index.nlevels)] + else: + index.name = "idx" + + warn = None + if index.dtype.kind == "c" and dtype in ["float64", "int64", "uint64"]: + # imaginary components discarded + if np_version_gte1p25: + warn = np.exceptions.ComplexWarning + else: + warn = np.ComplexWarning + + is_pyarrow_str = str(index.dtype) == "string[pyarrow]" and dtype == "category" + try: + # Some of these conversions cannot succeed so we use a try / except + with tm.assert_produces_warning( + warn, + raise_on_extra_warnings=is_pyarrow_str, + check_stacklevel=False, + ): + result = index.astype(dtype) + except (ValueError, TypeError, NotImplementedError, SystemError): + return + + if isinstance(index, MultiIndex): + assert result.names == index.names + else: + assert result.name == index.name + + def test_hasnans_isnans(self, index_flat): + # GH#11343, added tests for hasnans / isnans + index = index_flat + + # cases in indices doesn't include NaN + idx = index.copy(deep=True) + expected = np.array([False] * len(idx), dtype=bool) + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is False + + idx = index.copy(deep=True) + values = idx._values + + if len(index) == 0: + return + elif is_integer_dtype(index.dtype): + return + elif index.dtype == bool: + # values[1] = np.nan below casts to True! + return + + values[1] = np.nan + + idx = type(index)(values) + + expected = np.array([False] * len(idx), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is True + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize("na_position", [None, "middle"]) +def test_sort_values_invalid_na_position(index_with_missing, na_position): + with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"): + index_with_missing.sort_values(na_position=na_position) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize("na_position", ["first", "last"]) +def test_sort_values_with_missing(index_with_missing, na_position, request): + # GH 35584. Test that sort_values works with missing values, + # sort non-missing and place missing according to na_position + + if isinstance(index_with_missing, CategoricalIndex): + request.applymarker( + pytest.mark.xfail( + reason="missing value sorting order not well-defined", strict=False + ) + ) + + missing_count = np.sum(index_with_missing.isna()) + not_na_vals = index_with_missing[index_with_missing.notna()].values + sorted_values = np.sort(not_na_vals) + if na_position == "first": + sorted_values = np.concatenate([[None] * missing_count, sorted_values]) + else: + sorted_values = np.concatenate([sorted_values, [None] * missing_count]) + + # Explicitly pass dtype needed for Index backed by EA e.g. IntegerArray + expected = type(index_with_missing)(sorted_values, dtype=index_with_missing.dtype) + + result = index_with_missing.sort_values(na_position=na_position) + tm.assert_index_equal(result, expected) + + +def test_ndarray_compat_properties(index): + if isinstance(index, PeriodIndex) and not IS64: + pytest.skip("Overflow") + idx = index + assert idx.T.equals(idx) + assert idx.transpose().equals(idx) + + values = idx.values + + assert idx.shape == values.shape + assert idx.ndim == values.ndim + assert idx.size == values.size + + if not isinstance(index, (RangeIndex, MultiIndex)): + # These two are not backed by an ndarray + assert idx.nbytes == values.nbytes + + # test for validity + idx.nbytes + idx.values.nbytes + + +def test_compare_read_only_array(): + # GH#57130 + arr = np.array([], dtype=object) + arr.flags.writeable = False + idx = pd.Index(arr) + result = idx > 69 + assert result.dtype == bool diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_datetimelike.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_datetimelike.py new file mode 100644 index 0000000000000000000000000000000000000000..21a686e8bc05b09729c6fe54e67c96405ee36bca --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_datetimelike.py @@ -0,0 +1,171 @@ +""" generic datetimelike tests """ + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestDatetimeLike: + @pytest.fixture( + params=[ + pd.period_range("20130101", periods=5, freq="D"), + pd.TimedeltaIndex( + [ + "0 days 01:00:00", + "1 days 01:00:00", + "2 days 01:00:00", + "3 days 01:00:00", + "4 days 01:00:00", + ], + dtype="timedelta64[ns]", + freq="D", + ), + pd.DatetimeIndex( + ["2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05"], + dtype="datetime64[ns]", + freq="D", + ), + ] + ) + def simple_index(self, request): + return request.param + + def test_isin(self, simple_index): + index = simple_index[:4] + result = index.isin(index) + assert result.all() + + result = index.isin(list(index)) + assert result.all() + + result = index.isin([index[2], 5]) + expected = np.array([False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + def test_argsort_matches_array(self, simple_index): + idx = simple_index + idx = idx.insert(1, pd.NaT) + + result = idx.argsort() + expected = idx._data.argsort() + tm.assert_numpy_array_equal(result, expected) + + def test_can_hold_identifiers(self, simple_index): + idx = simple_index + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is False + + def test_shift_identity(self, simple_index): + idx = simple_index + tm.assert_index_equal(idx, idx.shift(0)) + + def test_shift_empty(self, simple_index): + # GH#14811 + idx = simple_index[:0] + tm.assert_index_equal(idx, idx.shift(1)) + + def test_str(self, simple_index): + # test the string repr + idx = simple_index.copy() + idx.name = "foo" + assert f"length={len(idx)}" not in str(idx) + assert "'foo'" in str(idx) + assert type(idx).__name__ in str(idx) + + if hasattr(idx, "tz"): + if idx.tz is not None: + assert idx.tz in str(idx) + if isinstance(idx, pd.PeriodIndex): + assert f"dtype='period[{idx.freqstr}]'" in str(idx) + else: + assert f"freq='{idx.freqstr}'" in str(idx) + + def test_view(self, simple_index): + idx = simple_index + + idx_view = idx.view("i8") + result = type(simple_index)(idx) + tm.assert_index_equal(result, idx) + + msg = "Passing a type in .*Index.view is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx_view = idx.view(type(simple_index)) + result = type(simple_index)(idx) + tm.assert_index_equal(result, idx_view) + + def test_map_callable(self, simple_index): + index = simple_index + expected = index + index.freq + result = index.map(lambda x: x + index.freq) + tm.assert_index_equal(result, expected) + + # map to NaT + result = index.map(lambda x: pd.NaT if x == index[0] else x) + expected = pd.Index([pd.NaT] + index[1:].tolist()) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: pd.Series(values, index, dtype=object), + ], + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_map_dictlike(self, mapper, simple_index): + index = simple_index + expected = index + index.freq + + # don't compare the freqs + if isinstance(expected, (pd.DatetimeIndex, pd.TimedeltaIndex)): + expected = expected._with_freq(None) + + result = index.map(mapper(expected, index)) + tm.assert_index_equal(result, expected) + + expected = pd.Index([pd.NaT] + index[1:].tolist()) + result = index.map(mapper(expected, index)) + tm.assert_index_equal(result, expected) + + # empty map; these map to np.nan because we cannot know + # to re-infer things + expected = pd.Index([np.nan] * len(index)) + result = index.map(mapper([], [])) + tm.assert_index_equal(result, expected) + + def test_getitem_preserves_freq(self, simple_index): + index = simple_index + assert index.freq is not None + + result = index[:] + assert result.freq == index.freq + + def test_where_cast_str(self, simple_index): + index = simple_index + + mask = np.ones(len(index), dtype=bool) + mask[-1] = False + + result = index.where(mask, str(index[0])) + expected = index.where(mask, index[0]) + tm.assert_index_equal(result, expected) + + result = index.where(mask, [str(index[0])]) + tm.assert_index_equal(result, expected) + + expected = index.astype(object).where(mask, "foo") + result = index.where(mask, "foo") + tm.assert_index_equal(result, expected) + + result = index.where(mask, ["foo"]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_diff(self, unit): + # GH 55080 + dti = pd.to_datetime([10, 20, 30], unit=unit).as_unit(unit) + result = dti.diff(1) + expected = pd.to_timedelta([pd.NaT, 10, 10], unit=unit).as_unit(unit) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_engines.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_engines.py new file mode 100644 index 0000000000000000000000000000000000000000..468c2240c8192098a6ff75a5a2d0210c8108a176 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_engines.py @@ -0,0 +1,192 @@ +import re + +import numpy as np +import pytest + +from pandas._libs import index as libindex + +import pandas as pd + + +@pytest.fixture( + params=[ + (libindex.Int64Engine, np.int64), + (libindex.Int32Engine, np.int32), + (libindex.Int16Engine, np.int16), + (libindex.Int8Engine, np.int8), + (libindex.UInt64Engine, np.uint64), + (libindex.UInt32Engine, np.uint32), + (libindex.UInt16Engine, np.uint16), + (libindex.UInt8Engine, np.uint8), + (libindex.Float64Engine, np.float64), + (libindex.Float32Engine, np.float32), + ], + ids=lambda x: x[0].__name__, +) +def numeric_indexing_engine_type_and_dtype(request): + return request.param + + +class TestDatetimeEngine: + @pytest.mark.parametrize( + "scalar", + [ + pd.Timedelta(pd.Timestamp("2016-01-01").asm8.view("m8[ns]")), + pd.Timestamp("2016-01-01")._value, + pd.Timestamp("2016-01-01").to_pydatetime(), + pd.Timestamp("2016-01-01").to_datetime64(), + ], + ) + def test_not_contains_requires_timestamp(self, scalar): + dti1 = pd.date_range("2016-01-01", periods=3) + dti2 = dti1.insert(1, pd.NaT) # non-monotonic + dti3 = dti1.insert(3, dti1[0]) # non-unique + dti4 = pd.date_range("2016-01-01", freq="ns", periods=2_000_000) + dti5 = dti4.insert(0, dti4[0]) # over size threshold, not unique + + msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))]) + for dti in [dti1, dti2, dti3, dti4, dti5]: + with pytest.raises(TypeError, match=msg): + scalar in dti._engine + + with pytest.raises(KeyError, match=msg): + dti._engine.get_loc(scalar) + + +class TestTimedeltaEngine: + @pytest.mark.parametrize( + "scalar", + [ + pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")), + pd.Timedelta(days=42)._value, + pd.Timedelta(days=42).to_pytimedelta(), + pd.Timedelta(days=42).to_timedelta64(), + ], + ) + def test_not_contains_requires_timedelta(self, scalar): + tdi1 = pd.timedelta_range("42 days", freq="9h", periods=1234) + tdi2 = tdi1.insert(1, pd.NaT) # non-monotonic + tdi3 = tdi1.insert(3, tdi1[0]) # non-unique + tdi4 = pd.timedelta_range("42 days", freq="ns", periods=2_000_000) + tdi5 = tdi4.insert(0, tdi4[0]) # over size threshold, not unique + + msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))]) + for tdi in [tdi1, tdi2, tdi3, tdi4, tdi5]: + with pytest.raises(TypeError, match=msg): + scalar in tdi._engine + + with pytest.raises(KeyError, match=msg): + tdi._engine.get_loc(scalar) + + +class TestNumericEngine: + def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype): + engine_type, dtype = numeric_indexing_engine_type_and_dtype + num = 1000 + arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) + + # monotonic increasing + engine = engine_type(arr) + assert engine.is_monotonic_increasing is True + assert engine.is_monotonic_decreasing is False + + # monotonic decreasing + engine = engine_type(arr[::-1]) + assert engine.is_monotonic_increasing is False + assert engine.is_monotonic_decreasing is True + + # neither monotonic increasing or decreasing + arr = np.array([1] * num + [2] * num + [1] * num, dtype=dtype) + engine = engine_type(arr[::-1]) + assert engine.is_monotonic_increasing is False + assert engine.is_monotonic_decreasing is False + + def test_is_unique(self, numeric_indexing_engine_type_and_dtype): + engine_type, dtype = numeric_indexing_engine_type_and_dtype + + # unique + arr = np.array([1, 3, 2], dtype=dtype) + engine = engine_type(arr) + assert engine.is_unique is True + + # not unique + arr = np.array([1, 2, 1], dtype=dtype) + engine = engine_type(arr) + assert engine.is_unique is False + + def test_get_loc(self, numeric_indexing_engine_type_and_dtype): + engine_type, dtype = numeric_indexing_engine_type_and_dtype + + # unique + arr = np.array([1, 2, 3], dtype=dtype) + engine = engine_type(arr) + assert engine.get_loc(2) == 1 + + # monotonic + num = 1000 + arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype) + engine = engine_type(arr) + assert engine.get_loc(2) == slice(1000, 2000) + + # not monotonic + arr = np.array([1, 2, 3] * num, dtype=dtype) + engine = engine_type(arr) + expected = np.array([False, True, False] * num, dtype=bool) + result = engine.get_loc(2) + assert (result == expected).all() + + +class TestObjectEngine: + engine_type = libindex.ObjectEngine + dtype = np.object_ + values = list("abc") + + def test_is_monotonic(self): + num = 1000 + arr = np.array(["a"] * num + ["a"] * num + ["c"] * num, dtype=self.dtype) + + # monotonic increasing + engine = self.engine_type(arr) + assert engine.is_monotonic_increasing is True + assert engine.is_monotonic_decreasing is False + + # monotonic decreasing + engine = self.engine_type(arr[::-1]) + assert engine.is_monotonic_increasing is False + assert engine.is_monotonic_decreasing is True + + # neither monotonic increasing or decreasing + arr = np.array(["a"] * num + ["b"] * num + ["a"] * num, dtype=self.dtype) + engine = self.engine_type(arr[::-1]) + assert engine.is_monotonic_increasing is False + assert engine.is_monotonic_decreasing is False + + def test_is_unique(self): + # unique + arr = np.array(self.values, dtype=self.dtype) + engine = self.engine_type(arr) + assert engine.is_unique is True + + # not unique + arr = np.array(["a", "b", "a"], dtype=self.dtype) + engine = self.engine_type(arr) + assert engine.is_unique is False + + def test_get_loc(self): + # unique + arr = np.array(self.values, dtype=self.dtype) + engine = self.engine_type(arr) + assert engine.get_loc("b") == 1 + + # monotonic + num = 1000 + arr = np.array(["a"] * num + ["b"] * num + ["c"] * num, dtype=self.dtype) + engine = self.engine_type(arr) + assert engine.get_loc("b") == slice(1000, 2000) + + # not monotonic + arr = np.array(self.values * num, dtype=self.dtype) + engine = self.engine_type(arr) + expected = np.array([False, True, False] * num, dtype=bool) + result = engine.get_loc("b") + assert (result == expected).all() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_frozen.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_frozen.py new file mode 100644 index 0000000000000000000000000000000000000000..ace66b5b06a51291d2cf229fdc446d070054836a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_frozen.py @@ -0,0 +1,113 @@ +import re + +import pytest + +from pandas.core.indexes.frozen import FrozenList + + +@pytest.fixture +def lst(): + return [1, 2, 3, 4, 5] + + +@pytest.fixture +def container(lst): + return FrozenList(lst) + + +@pytest.fixture +def unicode_container(): + return FrozenList(["\u05d0", "\u05d1", "c"]) + + +class TestFrozenList: + def check_mutable_error(self, *args, **kwargs): + # Pass whatever function you normally would to pytest.raises + # (after the Exception kind). + mutable_regex = re.compile("does not support mutable operations") + msg = "'(_s)?re.(SRE_)?Pattern' object is not callable" + with pytest.raises(TypeError, match=msg): + mutable_regex(*args, **kwargs) + + def test_no_mutable_funcs(self, container): + def setitem(): + container[0] = 5 + + self.check_mutable_error(setitem) + + def setslice(): + container[1:2] = 3 + + self.check_mutable_error(setslice) + + def delitem(): + del container[0] + + self.check_mutable_error(delitem) + + def delslice(): + del container[0:3] + + self.check_mutable_error(delslice) + + mutable_methods = ("extend", "pop", "remove", "insert") + + for meth in mutable_methods: + self.check_mutable_error(getattr(container, meth)) + + def test_slicing_maintains_type(self, container, lst): + result = container[1:2] + expected = lst[1:2] + self.check_result(result, expected) + + def check_result(self, result, expected): + assert isinstance(result, FrozenList) + assert result == expected + + def test_string_methods_dont_fail(self, container): + repr(container) + str(container) + bytes(container) + + def test_tricky_container(self, unicode_container): + repr(unicode_container) + str(unicode_container) + + def test_add(self, container, lst): + result = container + (1, 2, 3) + expected = FrozenList(lst + [1, 2, 3]) + self.check_result(result, expected) + + result = (1, 2, 3) + container + expected = FrozenList([1, 2, 3] + lst) + self.check_result(result, expected) + + def test_iadd(self, container, lst): + q = r = container + + q += [5] + self.check_result(q, lst + [5]) + + # Other shouldn't be mutated. + self.check_result(r, lst) + + def test_union(self, container, lst): + result = container.union((1, 2, 3)) + expected = FrozenList(lst + [1, 2, 3]) + self.check_result(result, expected) + + def test_difference(self, container): + result = container.difference([2]) + expected = FrozenList([1, 3, 4, 5]) + self.check_result(result, expected) + + def test_difference_dupe(self): + result = FrozenList([1, 2, 3, 2]).difference([2]) + expected = FrozenList([1, 3]) + self.check_result(result, expected) + + def test_tricky_container_to_bytes_raises(self, unicode_container): + # GH 26447 + msg = "^'str' object cannot be interpreted as an integer$" + with pytest.raises(TypeError, match=msg): + bytes(unicode_container) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_index_new.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_index_new.py new file mode 100644 index 0000000000000000000000000000000000000000..6042e5b9cc6793018ccf26f37aec236dfa353393 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_index_new.py @@ -0,0 +1,432 @@ +""" +Tests for the Index constructor conducting inference. +""" +from datetime import ( + datetime, + timedelta, + timezone, +) +from decimal import Decimal + +import numpy as np +import pytest + +from pandas._libs.tslibs.timezones import maybe_get_tz + +from pandas import ( + NA, + Categorical, + CategoricalIndex, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + NaT, + PeriodIndex, + Series, + TimedeltaIndex, + Timestamp, + array, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestIndexConstructorInference: + def test_object_all_bools(self): + # GH#49594 match Series behavior on ndarray[object] of all bools + arr = np.array([True, False], dtype=object) + res = Index(arr) + assert res.dtype == object + + # since the point is matching Series behavior, let's double check + assert Series(arr).dtype == object + + def test_object_all_complex(self): + # GH#49594 match Series behavior on ndarray[object] of all complex + arr = np.array([complex(1), complex(2)], dtype=object) + res = Index(arr) + assert res.dtype == object + + # since the point is matching Series behavior, let's double check + assert Series(arr).dtype == object + + @pytest.mark.parametrize("val", [NaT, None, np.nan, float("nan")]) + def test_infer_nat(self, val): + # GH#49340 all NaT/None/nan and at least 1 NaT -> datetime64[ns], + # matching Series behavior + values = [NaT, val] + + idx = Index(values) + assert idx.dtype == "datetime64[ns]" and idx.isna().all() + + idx = Index(values[::-1]) + assert idx.dtype == "datetime64[ns]" and idx.isna().all() + + idx = Index(np.array(values, dtype=object)) + assert idx.dtype == "datetime64[ns]" and idx.isna().all() + + idx = Index(np.array(values, dtype=object)[::-1]) + assert idx.dtype == "datetime64[ns]" and idx.isna().all() + + @pytest.mark.parametrize("na_value", [None, np.nan]) + @pytest.mark.parametrize("vtype", [list, tuple, iter]) + def test_construction_list_tuples_nan(self, na_value, vtype): + # GH#18505 : valid tuples containing NaN + values = [(1, "two"), (3.0, na_value)] + result = Index(vtype(values)) + expected = MultiIndex.from_tuples(values) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [int, "int64", "int32", "int16", "int8", "uint64", "uint32", "uint16", "uint8"], + ) + def test_constructor_int_dtype_float(self, dtype): + # GH#18400 + expected = Index([0, 1, 2, 3], dtype=dtype) + result = Index([0.0, 1.0, 2.0, 3.0], dtype=dtype) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", [[True, False, True], np.array([True, False, True], dtype=bool)] + ) + def test_constructor_dtypes_to_object(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=bool) + else: + index = Index(vals) + + assert type(index) is Index + assert index.dtype == bool + + def test_constructor_categorical_to_object(self): + # GH#32167 Categorical data and dtype=object should return object-dtype + ci = CategoricalIndex(range(5)) + result = Index(ci, dtype=object) + assert not isinstance(result, CategoricalIndex) + + def test_constructor_infer_periodindex(self): + xp = period_range("2012-1-1", freq="M", periods=3) + rs = Index(xp) + tm.assert_index_equal(rs, xp) + assert isinstance(rs, PeriodIndex) + + def test_from_list_of_periods(self): + rng = period_range("1/1/2000", periods=20, freq="D") + periods = list(rng) + + result = Index(periods) + assert isinstance(result, PeriodIndex) + + @pytest.mark.parametrize("pos", [0, 1]) + @pytest.mark.parametrize( + "klass,dtype,ctor", + [ + (DatetimeIndex, "datetime64[ns]", np.datetime64("nat")), + (TimedeltaIndex, "timedelta64[ns]", np.timedelta64("nat")), + ], + ) + def test_constructor_infer_nat_dt_like( + self, pos, klass, dtype, ctor, nulls_fixture, request + ): + if isinstance(nulls_fixture, Decimal): + # We dont cast these to datetime64/timedelta64 + pytest.skip( + f"We don't cast {type(nulls_fixture).__name__} to " + "datetime64/timedelta64" + ) + + expected = klass([NaT, NaT]) + assert expected.dtype == dtype + data = [ctor] + data.insert(pos, nulls_fixture) + + warn = None + if nulls_fixture is NA: + expected = Index([NA, NaT]) + mark = pytest.mark.xfail(reason="Broken with np.NaT ctor; see GH 31884") + request.applymarker(mark) + # GH#35942 numpy will emit a DeprecationWarning within the + # assert_index_equal calls. Since we can't do anything + # about it until GH#31884 is fixed, we suppress that warning. + warn = DeprecationWarning + + result = Index(data) + + with tm.assert_produces_warning(warn): + tm.assert_index_equal(result, expected) + + result = Index(np.array(data, dtype=object)) + + with tm.assert_produces_warning(warn): + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("swap_objs", [True, False]) + def test_constructor_mixed_nat_objs_infers_object(self, swap_objs): + # mixed np.datetime64/timedelta64 nat results in object + data = [np.datetime64("nat"), np.timedelta64("nat")] + if swap_objs: + data = data[::-1] + + expected = Index(data, dtype=object) + tm.assert_index_equal(Index(data), expected) + tm.assert_index_equal(Index(np.array(data, dtype=object)), expected) + + @pytest.mark.parametrize("swap_objs", [True, False]) + def test_constructor_datetime_and_datetime64(self, swap_objs): + data = [Timestamp(2021, 6, 8, 9, 42), np.datetime64("now")] + if swap_objs: + data = data[::-1] + expected = DatetimeIndex(data) + + tm.assert_index_equal(Index(data), expected) + tm.assert_index_equal(Index(np.array(data, dtype=object)), expected) + + def test_constructor_datetimes_mixed_tzs(self): + # https://github.com/pandas-dev/pandas/pull/55793/files#r1383719998 + tz = maybe_get_tz("US/Central") + dt1 = datetime(2020, 1, 1, tzinfo=tz) + dt2 = datetime(2020, 1, 1, tzinfo=timezone.utc) + result = Index([dt1, dt2]) + expected = Index([dt1, dt2], dtype=object) + tm.assert_index_equal(result, expected) + + +class TestDtypeEnforced: + # check we don't silently ignore the dtype keyword + + def test_constructor_object_dtype_with_ea_data(self, any_numeric_ea_dtype): + # GH#45206 + arr = array([0], dtype=any_numeric_ea_dtype) + + idx = Index(arr, dtype=object) + assert idx.dtype == object + + @pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"]) + def test_constructor_range_values_mismatched_dtype(self, dtype): + rng = Index(range(5)) + + result = Index(rng, dtype=dtype) + assert result.dtype == dtype + + result = Index(range(5), dtype=dtype) + assert result.dtype == dtype + + @pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"]) + def test_constructor_categorical_values_mismatched_non_ea_dtype(self, dtype): + cat = Categorical([1, 2, 3]) + + result = Index(cat, dtype=dtype) + assert result.dtype == dtype + + def test_constructor_categorical_values_mismatched_dtype(self): + dti = date_range("2016-01-01", periods=3) + cat = Categorical(dti) + result = Index(cat, dti.dtype) + tm.assert_index_equal(result, dti) + + dti2 = dti.tz_localize("Asia/Tokyo") + cat2 = Categorical(dti2) + result = Index(cat2, dti2.dtype) + tm.assert_index_equal(result, dti2) + + ii = IntervalIndex.from_breaks(range(5)) + cat3 = Categorical(ii) + result = Index(cat3, dtype=ii.dtype) + tm.assert_index_equal(result, ii) + + def test_constructor_ea_values_mismatched_categorical_dtype(self): + dti = date_range("2016-01-01", periods=3) + result = Index(dti, dtype="category") + expected = CategoricalIndex(dti) + tm.assert_index_equal(result, expected) + + dti2 = date_range("2016-01-01", periods=3, tz="US/Pacific") + result = Index(dti2, dtype="category") + expected = CategoricalIndex(dti2) + tm.assert_index_equal(result, expected) + + def test_constructor_period_values_mismatched_dtype(self): + pi = period_range("2016-01-01", periods=3, freq="D") + result = Index(pi, dtype="category") + expected = CategoricalIndex(pi) + tm.assert_index_equal(result, expected) + + def test_constructor_timedelta64_values_mismatched_dtype(self): + # check we don't silently ignore the dtype keyword + tdi = timedelta_range("4 Days", periods=5) + result = Index(tdi, dtype="category") + expected = CategoricalIndex(tdi) + tm.assert_index_equal(result, expected) + + def test_constructor_interval_values_mismatched_dtype(self): + dti = date_range("2016-01-01", periods=3) + ii = IntervalIndex.from_breaks(dti) + result = Index(ii, dtype="category") + expected = CategoricalIndex(ii) + tm.assert_index_equal(result, expected) + + def test_constructor_datetime64_values_mismatched_period_dtype(self): + dti = date_range("2016-01-01", periods=3) + result = Index(dti, dtype="Period[D]") + expected = dti.to_period("D") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["int64", "uint64"]) + def test_constructor_int_dtype_nan_raises(self, dtype): + # see GH#15187 + data = [np.nan] + msg = "cannot convert" + with pytest.raises(ValueError, match=msg): + Index(data, dtype=dtype) + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + np.array([1, 2, 3]), + np.array([1, 2, 3], dtype=int), + # below should coerce + [1.0, 2.0, 3.0], + np.array([1.0, 2.0, 3.0], dtype=float), + ], + ) + def test_constructor_dtypes_to_int(self, vals, any_int_numpy_dtype): + dtype = any_int_numpy_dtype + index = Index(vals, dtype=dtype) + assert index.dtype == dtype + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + [1.0, 2.0, 3.0], + np.array([1.0, 2.0, 3.0]), + np.array([1, 2, 3], dtype=int), + np.array([1.0, 2.0, 3.0], dtype=float), + ], + ) + def test_constructor_dtypes_to_float(self, vals, float_numpy_dtype): + dtype = float_numpy_dtype + index = Index(vals, dtype=dtype) + assert index.dtype == dtype + + @pytest.mark.parametrize( + "vals", + [ + [1, 2, 3], + np.array([1, 2, 3], dtype=int), + np.array(["2011-01-01", "2011-01-02"], dtype="datetime64[ns]"), + [datetime(2011, 1, 1), datetime(2011, 1, 2)], + ], + ) + def test_constructor_dtypes_to_categorical(self, vals): + index = Index(vals, dtype="category") + assert isinstance(index, CategoricalIndex) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", + [ + Index(np.array([np.datetime64("2011-01-01"), np.datetime64("2011-01-02")])), + Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]), + ], + ) + def test_constructor_dtypes_to_datetime(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=object) + assert isinstance(index, Index) + assert index.dtype == object + else: + index = Index(vals) + assert isinstance(index, DatetimeIndex) + + @pytest.mark.parametrize("cast_index", [True, False]) + @pytest.mark.parametrize( + "vals", + [ + np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]), + [timedelta(1), timedelta(1)], + ], + ) + def test_constructor_dtypes_to_timedelta(self, cast_index, vals): + if cast_index: + index = Index(vals, dtype=object) + assert isinstance(index, Index) + assert index.dtype == object + else: + index = Index(vals) + assert isinstance(index, TimedeltaIndex) + + def test_pass_timedeltaindex_to_index(self): + rng = timedelta_range("1 days", "10 days") + idx = Index(rng, dtype=object) + + expected = Index(rng.to_pytimedelta(), dtype=object) + + tm.assert_numpy_array_equal(idx.values, expected.values) + + def test_pass_datetimeindex_to_index(self): + # GH#1396 + rng = date_range("1/1/2000", "3/1/2000") + idx = Index(rng, dtype=object) + + expected = Index(rng.to_pydatetime(), dtype=object) + + tm.assert_numpy_array_equal(idx.values, expected.values) + + +class TestIndexConstructorUnwrapping: + # Test passing different arraylike values to pd.Index + + @pytest.mark.parametrize("klass", [Index, DatetimeIndex]) + def test_constructor_from_series_dt64(self, klass): + stamps = [Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")] + expected = DatetimeIndex(stamps) + ser = Series(stamps) + result = klass(ser) + tm.assert_index_equal(result, expected) + + def test_constructor_no_pandas_array(self): + ser = Series([1, 2, 3]) + result = Index(ser.array) + expected = Index([1, 2, 3]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "array", + [ + np.arange(5), + np.array(["a", "b", "c"]), + date_range("2000-01-01", periods=3).values, + ], + ) + def test_constructor_ndarray_like(self, array): + # GH#5460#issuecomment-44474502 + # it should be possible to convert any object that satisfies the numpy + # ndarray interface directly into an Index + class ArrayLike: + def __init__(self, array) -> None: + self.array = array + + def __array__(self, dtype=None, copy=None) -> np.ndarray: + return self.array + + expected = Index(array) + result = Index(ArrayLike(array)) + tm.assert_index_equal(result, expected) + + +class TestIndexConstructionErrors: + def test_constructor_overflow_int64(self): + # see GH#15832 + msg = ( + "The elements provided in the data cannot " + "all be casted to the dtype int64" + ) + with pytest.raises(OverflowError, match=msg): + Index([np.iinfo(np.uint64).max - 1], dtype="int64") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_indexing.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..1ea47f636ac9b64346b21496fe25d4fe109cd711 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_indexing.py @@ -0,0 +1,357 @@ +""" +test_indexing tests the following Index methods: + __getitem__ + get_loc + get_value + __contains__ + take + where + get_indexer + get_indexer_for + slice_locs + asof_locs + +The corresponding tests.indexes.[index_type].test_indexing files +contain tests for the corresponding methods specific to those Index subclasses. +""" +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_scalar, +) + +from pandas import ( + NA, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + NaT, + PeriodIndex, + TimedeltaIndex, +) +import pandas._testing as tm + + +class TestTake: + def test_take_invalid_kwargs(self, index): + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + index.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + index.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + index.take(indices, mode="clip") + + def test_take(self, index): + indexer = [4, 3, 0, 2] + if len(index) < 5: + pytest.skip("Test doesn't make sense since not enough elements") + + result = index.take(indexer) + expected = index[indexer] + assert result.equals(expected) + + if not isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + # GH 10791 + msg = r"'(.*Index)' object has no attribute 'freq'" + with pytest.raises(AttributeError, match=msg): + index.freq + + def test_take_indexer_type(self): + # GH#42875 + integer_index = Index([0, 1, 2, 3]) + scalar_index = 1 + msg = "Expected indices to be array-like" + with pytest.raises(TypeError, match=msg): + integer_index.take(scalar_index) + + def test_take_minus1_without_fill(self, index): + # -1 does not get treated as NA unless allow_fill=True is passed + if len(index) == 0: + # Test is not applicable + pytest.skip("Test doesn't make sense for empty index") + + result = index.take([0, 0, -1]) + + expected = index.take([0, 0, len(index) - 1]) + tm.assert_index_equal(result, expected) + + +class TestContains: + @pytest.mark.parametrize( + "index,val", + [ + (Index([0, 1, 2]), 2), + (Index([0, 1, "2"]), "2"), + (Index([0, 1, 2, np.inf, 4]), 4), + (Index([0, 1, 2, np.nan, 4]), 4), + (Index([0, 1, 2, np.inf]), np.inf), + (Index([0, 1, 2, np.nan]), np.nan), + ], + ) + def test_index_contains(self, index, val): + assert val in index + + @pytest.mark.parametrize( + "index,val", + [ + (Index([0, 1, 2]), "2"), + (Index([0, 1, "2"]), 2), + (Index([0, 1, 2, np.inf]), 4), + (Index([0, 1, 2, np.nan]), 4), + (Index([0, 1, 2, np.inf]), np.nan), + (Index([0, 1, 2, np.nan]), np.inf), + # Checking if np.inf in int64 Index should not cause an OverflowError + # Related to GH 16957 + (Index([0, 1, 2], dtype=np.int64), np.inf), + (Index([0, 1, 2], dtype=np.int64), np.nan), + (Index([0, 1, 2], dtype=np.uint64), np.inf), + (Index([0, 1, 2], dtype=np.uint64), np.nan), + ], + ) + def test_index_not_contains(self, index, val): + assert val not in index + + @pytest.mark.parametrize( + "index,val", [(Index([0, 1, "2"]), 0), (Index([0, 1, "2"]), "2")] + ) + def test_mixed_index_contains(self, index, val): + # GH#19860 + assert val in index + + @pytest.mark.parametrize( + "index,val", [(Index([0, 1, "2"]), "1"), (Index([0, 1, "2"]), 2)] + ) + def test_mixed_index_not_contains(self, index, val): + # GH#19860 + assert val not in index + + def test_contains_with_float_index(self, any_real_numpy_dtype): + # GH#22085 + dtype = any_real_numpy_dtype + data = [0, 1, 2, 3] if not is_float_dtype(dtype) else [0.1, 1.1, 2.2, 3.3] + index = Index(data, dtype=dtype) + + if not is_float_dtype(index.dtype): + assert 1.1 not in index + assert 1.0 in index + assert 1 in index + else: + assert 1.1 in index + assert 1.0 not in index + assert 1 not in index + + def test_contains_requires_hashable_raises(self, index): + if isinstance(index, MultiIndex): + return # TODO: do we want this to raise? + + msg = "unhashable type: 'list'" + with pytest.raises(TypeError, match=msg): + [] in index + + msg = "|".join( + [ + r"unhashable type: 'dict'", + r"must be real number, not dict", + r"an integer is required", + r"\{\}", + r"pandas\._libs\.interval\.IntervalTree' is not iterable", + ] + ) + with pytest.raises(TypeError, match=msg): + {} in index._engine + + +class TestGetLoc: + def test_get_loc_non_hashable(self, index): + with pytest.raises(InvalidIndexError, match="[0, 1]"): + index.get_loc([0, 1]) + + def test_get_loc_non_scalar_hashable(self, index): + # GH52877 + from enum import Enum + + class E(Enum): + X1 = "x1" + + assert not is_scalar(E.X1) + + exc = KeyError + msg = "" + if isinstance( + index, + ( + DatetimeIndex, + TimedeltaIndex, + PeriodIndex, + IntervalIndex, + ), + ): + # TODO: make these more consistent? + exc = InvalidIndexError + msg = "E.X1" + with pytest.raises(exc, match=msg): + index.get_loc(E.X1) + + def test_get_loc_generator(self, index): + exc = KeyError + if isinstance( + index, + ( + DatetimeIndex, + TimedeltaIndex, + PeriodIndex, + IntervalIndex, + MultiIndex, + ), + ): + # TODO: make these more consistent? + exc = InvalidIndexError + with pytest.raises(exc, match="generator object"): + # MultiIndex specifically checks for generator; others for scalar + index.get_loc(x for x in range(5)) + + def test_get_loc_masked_duplicated_na(self): + # GH#48411 + idx = Index([1, 2, NA, NA], dtype="Int64") + result = idx.get_loc(NA) + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + +class TestGetIndexer: + def test_get_indexer_base(self, index): + if index._index_as_unique: + expected = np.arange(index.size, dtype=np.intp) + actual = index.get_indexer(index) + tm.assert_numpy_array_equal(expected, actual) + else: + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + index.get_indexer(index) + + with pytest.raises(ValueError, match="Invalid fill method"): + index.get_indexer(index, method="invalid") + + def test_get_indexer_consistency(self, index): + # See GH#16819 + + if index._index_as_unique: + indexer = index.get_indexer(index[0:2]) + assert isinstance(indexer, np.ndarray) + assert indexer.dtype == np.intp + else: + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + index.get_indexer(index[0:2]) + + indexer, _ = index.get_indexer_non_unique(index[0:2]) + assert isinstance(indexer, np.ndarray) + assert indexer.dtype == np.intp + + def test_get_indexer_masked_duplicated_na(self): + # GH#48411 + idx = Index([1, 2, NA, NA], dtype="Int64") + result = idx.get_indexer_for(Index([1, NA], dtype="Int64")) + expected = np.array([0, 2, 3], dtype=result.dtype) + tm.assert_numpy_array_equal(result, expected) + + +class TestConvertSliceIndexer: + def test_convert_almost_null_slice(self, index): + # slice with None at both ends, but not step + + key = slice(None, None, "foo") + + if isinstance(index, IntervalIndex): + msg = "label-based slicing with step!=1 is not supported for IntervalIndex" + with pytest.raises(ValueError, match=msg): + index._convert_slice_indexer(key, "loc") + else: + msg = "'>=' not supported between instances of 'str' and 'int'" + with pytest.raises(TypeError, match=msg): + index._convert_slice_indexer(key, "loc") + + +class TestPutmask: + def test_putmask_with_wrong_mask(self, index): + # GH#18368 + if not len(index): + pytest.skip("Test doesn't make sense for empty index") + + fill = index[0] + + msg = "putmask: mask and data must be the same size" + with pytest.raises(ValueError, match=msg): + index.putmask(np.ones(len(index) + 1, np.bool_), fill) + + with pytest.raises(ValueError, match=msg): + index.putmask(np.ones(len(index) - 1, np.bool_), fill) + + with pytest.raises(ValueError, match=msg): + index.putmask("foo", fill) + + +@pytest.mark.parametrize( + "idx", [Index([1, 2, 3]), Index([0.1, 0.2, 0.3]), Index(["a", "b", "c"])] +) +def test_getitem_deprecated_float(idx): + # https://github.com/pandas-dev/pandas/issues/34191 + + msg = "Indexing with a float is no longer supported" + with pytest.raises(IndexError, match=msg): + idx[1.0] + + +@pytest.mark.parametrize( + "idx,target,expected", + [ + ([np.nan, "var1", np.nan], [np.nan], np.array([0, 2], dtype=np.intp)), + ( + [np.nan, "var1", np.nan], + [np.nan, "var1"], + np.array([0, 2, 1], dtype=np.intp), + ), + ( + np.array([np.nan, "var1", np.nan], dtype=object), + [np.nan], + np.array([0, 2], dtype=np.intp), + ), + ( + DatetimeIndex(["2020-08-05", NaT, NaT]), + [NaT], + np.array([1, 2], dtype=np.intp), + ), + (["a", "b", "a", np.nan], [np.nan], np.array([3], dtype=np.intp)), + ( + np.array(["b", np.nan, float("NaN"), "b"], dtype=object), + Index([np.nan], dtype=object), + np.array([1, 2], dtype=np.intp), + ), + ], +) +def test_get_indexer_non_unique_multiple_nans(idx, target, expected): + # GH 35392 + axis = Index(idx) + actual = axis.get_indexer_for(target) + tm.assert_numpy_array_equal(actual, expected) + + +def test_get_indexer_non_unique_nans_in_object_dtype_target(nulls_fixture): + idx = Index([1.0, 2.0]) + target = Index([1, nulls_fixture], dtype="object") + + result_idx, result_missing = idx.get_indexer_non_unique(target) + tm.assert_numpy_array_equal(result_idx, np.array([0, -1], dtype=np.intp)) + tm.assert_numpy_array_equal(result_missing, np.array([1], dtype=np.intp)) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_numpy_compat.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_numpy_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..ace78d77350cbdc4ca3aa837720767a965443051 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_numpy_compat.py @@ -0,0 +1,189 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + DatetimeIndex, + Index, + PeriodIndex, + TimedeltaIndex, + isna, +) +import pandas._testing as tm +from pandas.api.types import ( + is_complex_dtype, + is_numeric_dtype, +) +from pandas.core.arrays import BooleanArray +from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin + + +def test_numpy_ufuncs_out(index): + result = index == index + + out = np.empty(index.shape, dtype=bool) + np.equal(index, index, out=out) + tm.assert_numpy_array_equal(out, result) + + if not index._is_multi: + # same thing on the ExtensionArray + out = np.empty(index.shape, dtype=bool) + np.equal(index.array, index.array, out=out) + tm.assert_numpy_array_equal(out, result) + + +@pytest.mark.parametrize( + "func", + [ + np.exp, + np.exp2, + np.expm1, + np.log, + np.log2, + np.log10, + np.log1p, + np.sqrt, + np.sin, + np.cos, + np.tan, + np.arcsin, + np.arccos, + np.arctan, + np.sinh, + np.cosh, + np.tanh, + np.arcsinh, + np.arccosh, + np.arctanh, + np.deg2rad, + np.rad2deg, + ], + ids=lambda x: x.__name__, +) +def test_numpy_ufuncs_basic(index, func): + # test ufuncs of numpy, see: + # https://numpy.org/doc/stable/reference/ufuncs.html + + if isinstance(index, DatetimeIndexOpsMixin): + with tm.external_error_raised((TypeError, AttributeError)): + with np.errstate(all="ignore"): + func(index) + elif is_numeric_dtype(index) and not ( + is_complex_dtype(index) and func in [np.deg2rad, np.rad2deg] + ): + # coerces to float (e.g. np.sin) + with np.errstate(all="ignore"): + result = func(index) + arr_result = func(index.values) + if arr_result.dtype == np.float16: + arr_result = arr_result.astype(np.float32) + exp = Index(arr_result, name=index.name) + + tm.assert_index_equal(result, exp) + if isinstance(index.dtype, np.dtype) and is_numeric_dtype(index): + if is_complex_dtype(index): + assert result.dtype == index.dtype + elif index.dtype in ["bool", "int8", "uint8"]: + assert result.dtype in ["float16", "float32"] + elif index.dtype in ["int16", "uint16", "float32"]: + assert result.dtype == "float32" + else: + assert result.dtype == "float64" + else: + # e.g. np.exp with Int64 -> Float64 + assert type(result) is Index + # raise AttributeError or TypeError + elif len(index) == 0: + pass + else: + with tm.external_error_raised((TypeError, AttributeError)): + with np.errstate(all="ignore"): + func(index) + + +@pytest.mark.parametrize( + "func", [np.isfinite, np.isinf, np.isnan, np.signbit], ids=lambda x: x.__name__ +) +def test_numpy_ufuncs_other(index, func): + # test ufuncs of numpy, see: + # https://numpy.org/doc/stable/reference/ufuncs.html + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + if func in (np.isfinite, np.isinf, np.isnan): + # numpy 1.18 changed isinf and isnan to not raise on dt64/td64 + result = func(index) + assert isinstance(result, np.ndarray) + + out = np.empty(index.shape, dtype=bool) + func(index, out=out) + tm.assert_numpy_array_equal(out, result) + else: + with tm.external_error_raised(TypeError): + func(index) + + elif isinstance(index, PeriodIndex): + with tm.external_error_raised(TypeError): + func(index) + + elif is_numeric_dtype(index) and not ( + is_complex_dtype(index) and func is np.signbit + ): + # Results in bool array + result = func(index) + if not isinstance(index.dtype, np.dtype): + # e.g. Int64 we expect to get BooleanArray back + assert isinstance(result, BooleanArray) + else: + assert isinstance(result, np.ndarray) + + out = np.empty(index.shape, dtype=bool) + func(index, out=out) + + if not isinstance(index.dtype, np.dtype): + tm.assert_numpy_array_equal(out, result._data) + else: + tm.assert_numpy_array_equal(out, result) + + elif len(index) == 0: + pass + else: + with tm.external_error_raised(TypeError): + func(index) + + +@pytest.mark.parametrize("func", [np.maximum, np.minimum]) +def test_numpy_ufuncs_reductions(index, func, request): + # TODO: overlap with tests.series.test_ufunc.test_reductions + if len(index) == 0: + pytest.skip("Test doesn't make sense for empty index.") + + if isinstance(index, CategoricalIndex) and index.dtype.ordered is False: + with pytest.raises(TypeError, match="is not ordered for"): + func.reduce(index) + return + else: + result = func.reduce(index) + + if func is np.maximum: + expected = index.max(skipna=False) + else: + expected = index.min(skipna=False) + # TODO: do we have cases both with and without NAs? + + assert type(result) is type(expected) + if isna(result): + assert isna(expected) + else: + assert result == expected + + +@pytest.mark.parametrize("func", [np.bitwise_and, np.bitwise_or, np.bitwise_xor]) +def test_numpy_ufuncs_bitwise(func): + # https://github.com/pandas-dev/pandas/issues/46769 + idx1 = Index([1, 2, 3, 4], dtype="int64") + idx2 = Index([3, 4, 5, 6], dtype="int64") + + with tm.assert_produces_warning(None): + result = func(idx1, idx2) + + expected = Index(func(idx1.values, idx2.values)) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_old_base.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_old_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1787379b0faee68956932a451a1dfbbdc711204c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_old_base.py @@ -0,0 +1,1061 @@ +from __future__ import annotations + +from datetime import datetime +import weakref + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs.tslibs import Timestamp + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + CategoricalIndex, + DatetimeIndex, + DatetimeTZDtype, + Index, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, + isna, + period_range, +) +import pandas._testing as tm +import pandas.core.algorithms as algos +from pandas.core.arrays import BaseMaskedArray + + +class TestBase: + @pytest.fixture( + params=[ + RangeIndex(start=0, stop=20, step=2), + Index(np.arange(5, dtype=np.float64)), + Index(np.arange(5, dtype=np.float32)), + Index(np.arange(5, dtype=np.uint64)), + Index(range(0, 20, 2), dtype=np.int64), + Index(range(0, 20, 2), dtype=np.int32), + Index(range(0, 20, 2), dtype=np.int16), + Index(range(0, 20, 2), dtype=np.int8), + Index(list("abcde")), + Index([0, "a", 1, "b", 2, "c"]), + period_range("20130101", periods=5, freq="D"), + TimedeltaIndex( + [ + "0 days 01:00:00", + "1 days 01:00:00", + "2 days 01:00:00", + "3 days 01:00:00", + "4 days 01:00:00", + ], + dtype="timedelta64[ns]", + freq="D", + ), + DatetimeIndex( + ["2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05"], + dtype="datetime64[ns]", + freq="D", + ), + IntervalIndex.from_breaks(range(11), closed="right"), + ] + ) + def simple_index(self, request): + return request.param + + def test_pickle_compat_construction(self, simple_index): + # need an object to create with + if isinstance(simple_index, RangeIndex): + pytest.skip("RangeIndex() is a valid constructor") + msg = "|".join( + [ + r"Index\(\.\.\.\) must be called with a collection of some " + r"kind, None was passed", + r"DatetimeIndex\(\) must be called with a collection of some " + r"kind, None was passed", + r"TimedeltaIndex\(\) must be called with a collection of some " + r"kind, None was passed", + r"__new__\(\) missing 1 required positional argument: 'data'", + r"__new__\(\) takes at least 2 arguments \(1 given\)", + ] + ) + with pytest.raises(TypeError, match=msg): + type(simple_index)() + + def test_shift(self, simple_index): + # GH8083 test the base class for shift + if isinstance(simple_index, (DatetimeIndex, TimedeltaIndex, PeriodIndex)): + pytest.skip("Tested in test_ops/test_arithmetic") + idx = simple_index + msg = ( + f"This method is only implemented for DatetimeIndex, PeriodIndex and " + f"TimedeltaIndex; Got type {type(idx).__name__}" + ) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1) + with pytest.raises(NotImplementedError, match=msg): + idx.shift(1, 2) + + def test_constructor_name_unhashable(self, simple_index): + # GH#29069 check that name is hashable + # See also same-named test in tests.series.test_constructors + idx = simple_index + with pytest.raises(TypeError, match="Index.name must be a hashable type"): + type(idx)(idx, name=[]) + + def test_create_index_existing_name(self, simple_index): + # GH11193, when an existing index is passed, and a new name is not + # specified, the new index should inherit the previous object name + expected = simple_index.copy() + if not isinstance(expected, MultiIndex): + expected.name = "foo" + result = Index(expected) + tm.assert_index_equal(result, expected) + + result = Index(expected, name="bar") + expected.name = "bar" + tm.assert_index_equal(result, expected) + else: + expected.names = ["foo", "bar"] + result = Index(expected) + tm.assert_index_equal( + result, + Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ), + names=["foo", "bar"], + ), + ) + + result = Index(expected, names=["A", "B"]) + tm.assert_index_equal( + result, + Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ), + names=["A", "B"], + ), + ) + + def test_numeric_compat(self, simple_index): + idx = simple_index + # Check that this doesn't cover MultiIndex case, if/when it does, + # we can remove multi.test_compat.test_numeric_compat + assert not isinstance(idx, MultiIndex) + if type(idx) is Index: + pytest.skip("Not applicable for Index") + if is_numeric_dtype(simple_index.dtype) or isinstance( + simple_index, TimedeltaIndex + ): + pytest.skip("Tested elsewhere.") + + typ = type(idx._data).__name__ + cls = type(idx).__name__ + lmsg = "|".join( + [ + rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'", + "cannot perform (__mul__|__truediv__|__floordiv__) with " + f"this index type: ({cls}|{typ})", + ] + ) + with pytest.raises(TypeError, match=lmsg): + idx * 1 + rmsg = "|".join( + [ + rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'", + "cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with " + f"this index type: ({cls}|{typ})", + ] + ) + with pytest.raises(TypeError, match=rmsg): + 1 * idx + + div_err = lmsg.replace("*", "/") + with pytest.raises(TypeError, match=div_err): + idx / 1 + div_err = rmsg.replace("*", "/") + with pytest.raises(TypeError, match=div_err): + 1 / idx + + floordiv_err = lmsg.replace("*", "//") + with pytest.raises(TypeError, match=floordiv_err): + idx // 1 + floordiv_err = rmsg.replace("*", "//") + with pytest.raises(TypeError, match=floordiv_err): + 1 // idx + + def test_logical_compat(self, simple_index): + if simple_index.dtype in (object, "string"): + pytest.skip("Tested elsewhere.") + idx = simple_index + if idx.dtype.kind in "iufcbm": + assert idx.all() == idx._values.all() + assert idx.all() == idx.to_series().all() + assert idx.any() == idx._values.any() + assert idx.any() == idx.to_series().any() + else: + msg = "cannot perform (any|all)" + if isinstance(idx, IntervalIndex): + msg = ( + r"'IntervalArray' with dtype interval\[.*\] does " + "not support reduction '(any|all)'" + ) + with pytest.raises(TypeError, match=msg): + idx.all() + with pytest.raises(TypeError, match=msg): + idx.any() + + def test_repr_roundtrip(self, simple_index): + if isinstance(simple_index, IntervalIndex): + pytest.skip(f"Not a valid repr for {type(simple_index).__name__}") + idx = simple_index + tm.assert_index_equal(eval(repr(idx)), idx) + + def test_repr_max_seq_item_setting(self, simple_index): + # GH10182 + if isinstance(simple_index, IntervalIndex): + pytest.skip(f"Not a valid repr for {type(simple_index).__name__}") + idx = simple_index + idx = idx.repeat(50) + with pd.option_context("display.max_seq_items", None): + repr(idx) + assert "..." not in str(idx) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_ensure_copied_data(self, index): + # Check the "copy" argument of each Index.__new__ is honoured + # GH12309 + init_kwargs = {} + if isinstance(index, PeriodIndex): + # Needs "freq" specification: + init_kwargs["freq"] = index.freq + elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)): + pytest.skip( + "RangeIndex cannot be initialized from data, " + "MultiIndex and CategoricalIndex are tested separately" + ) + elif index.dtype == object and index.inferred_type == "boolean": + init_kwargs["dtype"] = index.dtype + + index_type = type(index) + result = index_type(index.values, copy=True, **init_kwargs) + if isinstance(index.dtype, DatetimeTZDtype): + result = result.tz_localize("UTC").tz_convert(index.tz) + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + index = index._with_freq(None) + + tm.assert_index_equal(index, result) + + if isinstance(index, PeriodIndex): + # .values an object array of Period, thus copied + depr_msg = "The 'ordinal' keyword in PeriodIndex is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = index_type(ordinal=index.asi8, copy=False, **init_kwargs) + tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same") + elif isinstance(index, IntervalIndex): + # checked in test_interval.py + pass + elif type(index) is Index and not isinstance(index.dtype, np.dtype): + result = index_type(index.values, copy=False, **init_kwargs) + tm.assert_index_equal(result, index) + + if isinstance(index._values, BaseMaskedArray): + assert np.shares_memory(index._values._data, result._values._data) + tm.assert_numpy_array_equal( + index._values._data, result._values._data, check_same="same" + ) + assert np.shares_memory(index._values._mask, result._values._mask) + tm.assert_numpy_array_equal( + index._values._mask, result._values._mask, check_same="same" + ) + elif index.dtype == "string[python]": + assert np.shares_memory(index._values._ndarray, result._values._ndarray) + tm.assert_numpy_array_equal( + index._values._ndarray, result._values._ndarray, check_same="same" + ) + elif index.dtype in ("string[pyarrow]", "string[pyarrow_numpy]"): + assert tm.shares_memory(result._values, index._values) + else: + raise NotImplementedError(index.dtype) + else: + result = index_type(index.values, copy=False, **init_kwargs) + tm.assert_numpy_array_equal(index.values, result.values, check_same="same") + + def test_memory_usage(self, index): + index._engine.clear_mapping() + result = index.memory_usage() + if index.empty: + # we report 0 for no-length + assert result == 0 + return + + # non-zero length + index.get_loc(index[0]) + result2 = index.memory_usage() + result3 = index.memory_usage(deep=True) + + # RangeIndex, IntervalIndex + # don't have engines + # Index[EA] has engine but it does not have a Hashtable .mapping + if not isinstance(index, (RangeIndex, IntervalIndex)) and not ( + type(index) is Index and not isinstance(index.dtype, np.dtype) + ): + assert result2 > result + + if index.inferred_type == "object": + assert result3 > result2 + + def test_argsort(self, index): + if isinstance(index, CategoricalIndex): + pytest.skip(f"{type(self).__name__} separately tested") + + result = index.argsort() + expected = np.array(index).argsort() + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + def test_numpy_argsort(self, index): + result = np.argsort(index) + expected = index.argsort() + tm.assert_numpy_array_equal(result, expected) + + result = np.argsort(index, kind="mergesort") + expected = index.argsort(kind="mergesort") + tm.assert_numpy_array_equal(result, expected) + + # these are the only two types that perform + # pandas compatibility input validation - the + # rest already perform separate (or no) such + # validation via their 'values' attribute as + # defined in pandas.core.indexes/base.py - they + # cannot be changed at the moment due to + # backwards compatibility concerns + if isinstance(index, (CategoricalIndex, RangeIndex)): + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(index, axis=1) + + msg = "the 'order' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(index, order=("a", "b")) + + def test_repeat(self, simple_index): + rep = 2 + idx = simple_index.copy() + new_index_cls = idx._constructor + expected = new_index_cls(idx.values.repeat(rep), name=idx.name) + tm.assert_index_equal(idx.repeat(rep), expected) + + idx = simple_index + rep = np.arange(len(idx)) + expected = new_index_cls(idx.values.repeat(rep), name=idx.name) + tm.assert_index_equal(idx.repeat(rep), expected) + + def test_numpy_repeat(self, simple_index): + rep = 2 + idx = simple_index + expected = idx.repeat(rep) + tm.assert_index_equal(np.repeat(idx, rep), expected) + + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.repeat(idx, rep, axis=0) + + def test_where(self, listlike_box, simple_index): + if isinstance(simple_index, (IntervalIndex, PeriodIndex)) or is_numeric_dtype( + simple_index.dtype + ): + pytest.skip("Tested elsewhere.") + klass = listlike_box + + idx = simple_index + if isinstance(idx, (DatetimeIndex, TimedeltaIndex)): + # where does not preserve freq + idx = idx._with_freq(None) + + cond = [True] * len(idx) + result = idx.where(klass(cond)) + expected = idx + tm.assert_index_equal(result, expected) + + cond = [False] + [True] * len(idx[1:]) + expected = Index([idx._na_value] + idx[1:].tolist(), dtype=idx.dtype) + result = idx.where(klass(cond)) + tm.assert_index_equal(result, expected) + + def test_insert_base(self, index): + trimmed = index[1:4] + + if not len(index): + pytest.skip("Not applicable for empty index") + + # test 0th element + warn = None + if index.dtype == object and index.inferred_type == "boolean": + # GH#51363 + warn = FutureWarning + msg = "The behavior of Index.insert with object-dtype is deprecated" + with tm.assert_produces_warning(warn, match=msg): + result = trimmed.insert(0, index[0]) + assert index[0:4].equals(result) + + @pytest.mark.skipif( + using_pyarrow_string_dtype(), + reason="completely different behavior, tested elsewher", + ) + def test_insert_out_of_bounds(self, index): + # TypeError/IndexError matches what np.insert raises in these cases + + if len(index) > 0: + err = TypeError + else: + err = IndexError + if len(index) == 0: + # 0 vs 0.5 in error message varies with numpy version + msg = "index (0|0.5) is out of bounds for axis 0 with size 0" + else: + msg = "slice indices must be integers or None or have an __index__ method" + with pytest.raises(err, match=msg): + index.insert(0.5, "foo") + + msg = "|".join( + [ + r"index -?\d+ is out of bounds for axis 0 with size \d+", + "loc must be an integer between", + ] + ) + with pytest.raises(IndexError, match=msg): + index.insert(len(index) + 1, 1) + + with pytest.raises(IndexError, match=msg): + index.insert(-len(index) - 1, 1) + + def test_delete_base(self, index): + if not len(index): + pytest.skip("Not applicable for empty index") + + if isinstance(index, RangeIndex): + # tested in class + pytest.skip(f"{type(self).__name__} tested elsewhere") + + expected = index[1:] + result = index.delete(0) + assert result.equals(expected) + assert result.name == expected.name + + expected = index[:-1] + result = index.delete(-1) + assert result.equals(expected) + assert result.name == expected.name + + length = len(index) + msg = f"index {length} is out of bounds for axis 0 with size {length}" + with pytest.raises(IndexError, match=msg): + index.delete(length) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_equals(self, index): + if isinstance(index, IntervalIndex): + pytest.skip(f"{type(index).__name__} tested elsewhere") + + is_ea_idx = type(index) is Index and not isinstance(index.dtype, np.dtype) + + assert index.equals(index) + assert index.equals(index.copy()) + if not is_ea_idx: + # doesn't hold for e.g. IntegerDtype + assert index.equals(index.astype(object)) + + assert not index.equals(list(index)) + assert not index.equals(np.array(index)) + + # Cannot pass in non-int64 dtype to RangeIndex + if not isinstance(index, RangeIndex) and not is_ea_idx: + same_values = Index(index, dtype=object) + assert index.equals(same_values) + assert same_values.equals(index) + + if index.nlevels == 1: + # do not test MultiIndex + assert not index.equals(Series(index)) + + def test_equals_op(self, simple_index): + # GH9947, GH10637 + index_a = simple_index + + n = len(index_a) + index_b = index_a[0:-1] + index_c = index_a[0:-1].append(index_a[-2:-1]) + index_d = index_a[0:1] + + msg = "Lengths must match|could not be broadcast" + with pytest.raises(ValueError, match=msg): + index_a == index_b + expected1 = np.array([True] * n) + expected2 = np.array([True] * (n - 1) + [False]) + tm.assert_numpy_array_equal(index_a == index_a, expected1) + tm.assert_numpy_array_equal(index_a == index_c, expected2) + + # test comparisons with numpy arrays + array_a = np.array(index_a) + array_b = np.array(index_a[0:-1]) + array_c = np.array(index_a[0:-1].append(index_a[-2:-1])) + array_d = np.array(index_a[0:1]) + with pytest.raises(ValueError, match=msg): + index_a == array_b + tm.assert_numpy_array_equal(index_a == array_a, expected1) + tm.assert_numpy_array_equal(index_a == array_c, expected2) + + # test comparisons with Series + series_a = Series(array_a) + series_b = Series(array_b) + series_c = Series(array_c) + series_d = Series(array_d) + with pytest.raises(ValueError, match=msg): + index_a == series_b + + tm.assert_numpy_array_equal(index_a == series_a, expected1) + tm.assert_numpy_array_equal(index_a == series_c, expected2) + + # cases where length is 1 for one of them + with pytest.raises(ValueError, match="Lengths must match"): + index_a == index_d + with pytest.raises(ValueError, match="Lengths must match"): + index_a == series_d + with pytest.raises(ValueError, match="Lengths must match"): + index_a == array_d + msg = "Can only compare identically-labeled Series objects" + with pytest.raises(ValueError, match=msg): + series_a == series_d + with pytest.raises(ValueError, match="Lengths must match"): + series_a == array_d + + # comparing with a scalar should broadcast; note that we are excluding + # MultiIndex because in this case each item in the index is a tuple of + # length 2, and therefore is considered an array of length 2 in the + # comparison instead of a scalar + if not isinstance(index_a, MultiIndex): + expected3 = np.array([False] * (len(index_a) - 2) + [True, False]) + # assuming the 2nd to last item is unique in the data + item = index_a[-2] + tm.assert_numpy_array_equal(index_a == item, expected3) + tm.assert_series_equal(series_a == item, Series(expected3)) + + def test_format(self, simple_index): + # GH35439 + if is_numeric_dtype(simple_index.dtype) or isinstance( + simple_index, DatetimeIndex + ): + pytest.skip("Tested elsewhere.") + idx = simple_index + expected = [str(x) for x in idx] + msg = r"Index\.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert idx.format() == expected + + def test_format_empty(self, simple_index): + # GH35712 + if isinstance(simple_index, (PeriodIndex, RangeIndex)): + pytest.skip("Tested elsewhere") + empty_idx = type(simple_index)([]) + msg = r"Index\.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert empty_idx.format() == [] + with tm.assert_produces_warning(FutureWarning, match=msg): + assert empty_idx.format(name=True) == [""] + + def test_fillna(self, index): + # GH 11343 + if len(index) == 0: + pytest.skip("Not relevant for empty index") + elif index.dtype == bool: + pytest.skip(f"{index.dtype} cannot hold NAs") + elif isinstance(index, Index) and is_integer_dtype(index.dtype): + pytest.skip(f"Not relevant for Index with {index.dtype}") + elif isinstance(index, MultiIndex): + idx = index.copy(deep=True) + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.fillna(idx[0]) + else: + idx = index.copy(deep=True) + result = idx.fillna(idx[0]) + tm.assert_index_equal(result, idx) + assert result is not idx + + msg = "'value' must be a scalar, passed: " + with pytest.raises(TypeError, match=msg): + idx.fillna([idx[0]]) + + idx = index.copy(deep=True) + values = idx._values + + values[1] = np.nan + + idx = type(index)(values) + + msg = "does not support 'downcast'" + msg2 = r"The 'downcast' keyword in .*Index\.fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg2): + with pytest.raises(NotImplementedError, match=msg): + # For now at least, we only raise if there are NAs present + idx.fillna(idx[0], downcast="infer") + + expected = np.array([False] * len(idx), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(idx._isnan, expected) + assert idx.hasnans is True + + def test_nulls(self, index): + # this is really a smoke test for the methods + # as these are adequately tested for function elsewhere + if len(index) == 0: + tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool)) + elif isinstance(index, MultiIndex): + idx = index.copy() + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.isna() + elif not index.hasnans: + tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool)) + tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool)) + else: + result = isna(index) + tm.assert_numpy_array_equal(index.isna(), result) + tm.assert_numpy_array_equal(index.notna(), ~result) + + def test_empty(self, simple_index): + # GH 15270 + idx = simple_index + assert not idx.empty + assert idx[:0].empty + + def test_join_self_unique(self, join_type, simple_index): + idx = simple_index + if idx.is_unique: + joined = idx.join(idx, how=join_type) + expected = simple_index + if join_type == "outer": + expected = algos.safe_sort(expected) + tm.assert_index_equal(joined, expected) + + def test_map(self, simple_index): + # callable + if isinstance(simple_index, (TimedeltaIndex, PeriodIndex)): + pytest.skip("Tested elsewhere.") + idx = simple_index + + result = idx.map(lambda x: x) + # RangeIndex are equivalent to the similar Index with int64 dtype + tm.assert_index_equal(result, idx, exact="equiv") + + @pytest.mark.parametrize( + "mapper", + [ + lambda values, index: {i: e for e, i in zip(values, index)}, + lambda values, index: Series(values, index), + ], + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_map_dictlike(self, mapper, simple_index, request): + idx = simple_index + if isinstance(idx, (DatetimeIndex, TimedeltaIndex, PeriodIndex)): + pytest.skip("Tested elsewhere.") + + identity = mapper(idx.values, idx) + + result = idx.map(identity) + # RangeIndex are equivalent to the similar Index with int64 dtype + tm.assert_index_equal(result, idx, exact="equiv") + + # empty mappable + dtype = None + if idx.dtype.kind == "f": + dtype = idx.dtype + + expected = Index([np.nan] * len(idx), dtype=dtype) + result = idx.map(mapper(expected, idx)) + tm.assert_index_equal(result, expected) + + def test_map_str(self, simple_index): + # GH 31202 + if isinstance(simple_index, CategoricalIndex): + pytest.skip("See test_map.py") + idx = simple_index + result = idx.map(str) + expected = Index([str(x) for x in idx]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("copy", [True, False]) + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize("ordered", [True, False]) + def test_astype_category(self, copy, name, ordered, simple_index): + # GH 18630 + idx = simple_index + if name: + idx = idx.rename(name) + + # standard categories + dtype = CategoricalDtype(ordered=ordered) + result = idx.astype(dtype, copy=copy) + expected = CategoricalIndex(idx, name=name, ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # non-standard categories + dtype = CategoricalDtype(idx.unique().tolist()[:-1], ordered) + result = idx.astype(dtype, copy=copy) + expected = CategoricalIndex(idx, name=name, dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + if ordered is False: + # dtype='category' defaults to ordered=False, so only test once + result = idx.astype("category", copy=copy) + expected = CategoricalIndex(idx, name=name) + tm.assert_index_equal(result, expected, exact=True) + + def test_is_unique(self, simple_index): + # initialize a unique index + index = simple_index.drop_duplicates() + assert index.is_unique is True + + # empty index should be unique + index_empty = index[:0] + assert index_empty.is_unique is True + + # test basic dupes + index_dup = index.insert(0, index[0]) + assert index_dup.is_unique is False + + # single NA should be unique + index_na = index.insert(0, np.nan) + assert index_na.is_unique is True + + # multiple NA should not be unique + index_na_dup = index_na.insert(0, np.nan) + assert index_na_dup.is_unique is False + + @pytest.mark.arm_slow + def test_engine_reference_cycle(self, simple_index): + # GH27585 + index = simple_index.copy() + ref = weakref.ref(index) + index._engine + del index + assert ref() is None + + def test_getitem_2d_deprecated(self, simple_index): + # GH#30588, GH#31479 + if isinstance(simple_index, IntervalIndex): + pytest.skip("Tested elsewhere") + idx = simple_index + msg = "Multi-dimensional indexing|too many|only" + with pytest.raises((ValueError, IndexError), match=msg): + idx[:, None] + + if not isinstance(idx, RangeIndex): + # GH#44051 RangeIndex already raised pre-2.0 with a different message + with pytest.raises((ValueError, IndexError), match=msg): + idx[True] + with pytest.raises((ValueError, IndexError), match=msg): + idx[False] + else: + msg = "only integers, slices" + with pytest.raises(IndexError, match=msg): + idx[True] + with pytest.raises(IndexError, match=msg): + idx[False] + + def test_copy_shares_cache(self, simple_index): + # GH32898, GH36840 + idx = simple_index + idx.get_loc(idx[0]) # populates the _cache. + copy = idx.copy() + + assert copy._cache is idx._cache + + def test_shallow_copy_shares_cache(self, simple_index): + # GH32669, GH36840 + idx = simple_index + idx.get_loc(idx[0]) # populates the _cache. + shallow_copy = idx._view() + + assert shallow_copy._cache is idx._cache + + shallow_copy = idx._shallow_copy(idx._data) + assert shallow_copy._cache is not idx._cache + assert shallow_copy._cache == {} + + def test_index_groupby(self, simple_index): + idx = simple_index[:5] + to_groupby = np.array([1, 2, np.nan, 2, 1]) + tm.assert_dict_equal( + idx.groupby(to_groupby), {1.0: idx[[0, 4]], 2.0: idx[[1, 3]]} + ) + + to_groupby = DatetimeIndex( + [ + datetime(2011, 11, 1), + datetime(2011, 12, 1), + pd.NaT, + datetime(2011, 12, 1), + datetime(2011, 11, 1), + ], + tz="UTC", + ).values + + ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")] + expected = {ex_keys[0]: idx[[0, 4]], ex_keys[1]: idx[[1, 3]]} + tm.assert_dict_equal(idx.groupby(to_groupby), expected) + + def test_append_preserves_dtype(self, simple_index): + # In particular Index with dtype float32 + index = simple_index + N = len(index) + + result = index.append(index) + assert result.dtype == index.dtype + tm.assert_index_equal(result[:N], index, check_exact=True) + tm.assert_index_equal(result[N:], index, check_exact=True) + + alt = index.take(list(range(N)) * 2) + tm.assert_index_equal(result, alt, check_exact=True) + + def test_inv(self, simple_index, using_infer_string): + idx = simple_index + + if idx.dtype.kind in ["i", "u"]: + res = ~idx + expected = Index(~idx.values, name=idx.name) + tm.assert_index_equal(res, expected) + + # check that we are matching Series behavior + res2 = ~Series(idx) + tm.assert_series_equal(res2, Series(expected)) + else: + if idx.dtype.kind == "f": + err = TypeError + msg = "ufunc 'invert' not supported for the input types" + elif using_infer_string and idx.dtype == "string": + import pyarrow as pa + + err = pa.lib.ArrowNotImplementedError + msg = "has no kernel" + else: + err = TypeError + msg = "bad operand" + with pytest.raises(err, match=msg): + ~idx + + # check that we get the same behavior with Series + with pytest.raises(err, match=msg): + ~Series(idx) + + def test_is_boolean_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_boolean() + + def test_is_floating_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_floating() + + def test_is_integer_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_integer() + + def test_holds_integer_deprecated(self, simple_index): + # GH50243 + idx = simple_index + msg = f"{type(idx).__name__}.holds_integer is deprecated. " + with tm.assert_produces_warning(FutureWarning, match=msg): + idx.holds_integer() + + def test_is_numeric_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning( + FutureWarning, + match=f"{type(idx).__name__}.is_numeric is deprecated. ", + ): + idx.is_numeric() + + def test_is_categorical_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning( + FutureWarning, + match=r"Use pandas\.api\.types\.is_categorical_dtype instead", + ): + idx.is_categorical() + + def test_is_interval_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_interval() + + def test_is_object_is_deprecated(self, simple_index): + # GH50042 + idx = simple_index + with tm.assert_produces_warning(FutureWarning): + idx.is_object() + + +class TestNumericBase: + @pytest.fixture( + params=[ + RangeIndex(start=0, stop=20, step=2), + Index(np.arange(5, dtype=np.float64)), + Index(np.arange(5, dtype=np.float32)), + Index(np.arange(5, dtype=np.uint64)), + Index(range(0, 20, 2), dtype=np.int64), + Index(range(0, 20, 2), dtype=np.int32), + Index(range(0, 20, 2), dtype=np.int16), + Index(range(0, 20, 2), dtype=np.int8), + ] + ) + def simple_index(self, request): + return request.param + + def test_constructor_unwraps_index(self, simple_index): + if isinstance(simple_index, RangeIndex): + pytest.skip("Tested elsewhere.") + index_cls = type(simple_index) + dtype = simple_index.dtype + + idx = Index([1, 2], dtype=dtype) + result = index_cls(idx) + expected = np.array([1, 2], dtype=idx.dtype) + tm.assert_numpy_array_equal(result._data, expected) + + def test_can_hold_identifiers(self, simple_index): + idx = simple_index + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is False + + def test_view(self, simple_index): + if isinstance(simple_index, RangeIndex): + pytest.skip("Tested elsewhere.") + index_cls = type(simple_index) + dtype = simple_index.dtype + + idx = index_cls([], dtype=dtype, name="Foo") + idx_view = idx.view() + assert idx_view.name == "Foo" + + idx_view = idx.view(dtype) + tm.assert_index_equal(idx, index_cls(idx_view, name="Foo"), exact=True) + + msg = "Passing a type in .*Index.view is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx_view = idx.view(index_cls) + tm.assert_index_equal(idx, index_cls(idx_view, name="Foo"), exact=True) + + def test_format(self, simple_index): + # GH35439 + if isinstance(simple_index, DatetimeIndex): + pytest.skip("Tested elsewhere") + idx = simple_index + max_width = max(len(str(x)) for x in idx) + expected = [str(x).ljust(max_width) for x in idx] + msg = r"Index\.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert idx.format() == expected + + def test_insert_non_na(self, simple_index): + # GH#43921 inserting an element that we know we can hold should + # not change dtype or type (except for RangeIndex) + index = simple_index + + result = index.insert(0, index[0]) + + expected = Index([index[0]] + list(index), dtype=index.dtype) + tm.assert_index_equal(result, expected, exact=True) + + def test_insert_na(self, nulls_fixture, simple_index): + # GH 18295 (test missing) + index = simple_index + na_val = nulls_fixture + + if na_val is pd.NaT: + expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object) + else: + expected = Index([index[0], np.nan] + list(index[1:])) + # GH#43921 we preserve float dtype + if index.dtype.kind == "f": + expected = Index(expected, dtype=index.dtype) + + result = index.insert(1, na_val) + tm.assert_index_equal(result, expected, exact=True) + + def test_arithmetic_explicit_conversions(self, simple_index): + # GH 8608 + # add/sub are overridden explicitly for Float/Int Index + index_cls = type(simple_index) + if index_cls is RangeIndex: + idx = RangeIndex(5) + else: + idx = index_cls(np.arange(5, dtype="int64")) + + # float conversions + arr = np.arange(5, dtype="int64") * 3.2 + expected = Index(arr, dtype=np.float64) + fidx = idx * 3.2 + tm.assert_index_equal(fidx, expected) + fidx = 3.2 * idx + tm.assert_index_equal(fidx, expected) + + # interops with numpy arrays + expected = Index(arr, dtype=np.float64) + a = np.zeros(5, dtype="float64") + result = fidx - a + tm.assert_index_equal(result, expected) + + expected = Index(-arr, dtype=np.float64) + a = np.zeros(5, dtype="float64") + result = a - fidx + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("complex_dtype", [np.complex64, np.complex128]) + def test_astype_to_complex(self, complex_dtype, simple_index): + result = simple_index.astype(complex_dtype) + + assert type(result) is Index and result.dtype == complex_dtype + + def test_cast_string(self, simple_index): + if isinstance(simple_index, RangeIndex): + pytest.skip("casting of strings not relevant for RangeIndex") + result = type(simple_index)(["0", "1", "2"], dtype=simple_index.dtype) + expected = type(simple_index)([0, 1, 2], dtype=simple_index.dtype) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_setops.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_setops.py new file mode 100644 index 0000000000000000000000000000000000000000..4a6982cf98670b43cfb87f5f3ea5a4edd5a7fd36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_setops.py @@ -0,0 +1,959 @@ +""" +The tests in this package are to ensure the proper resultant dtypes of +set operations. +""" +from datetime import datetime +import operator + +import numpy as np +import pytest + +from pandas._libs import lib + +from pandas.core.dtypes.cast import find_common_type + +from pandas import ( + CategoricalDtype, + CategoricalIndex, + DatetimeTZDtype, + Index, + MultiIndex, + PeriodDtype, + RangeIndex, + Series, + Timestamp, +) +import pandas._testing as tm +from pandas.api.types import ( + is_signed_integer_dtype, + pandas_dtype, +) + + +def equal_contents(arr1, arr2) -> bool: + """ + Checks if the set of unique elements of arr1 and arr2 are equivalent. + """ + return frozenset(arr1) == frozenset(arr2) + + +@pytest.fixture( + params=tm.ALL_REAL_NUMPY_DTYPES + + [ + "object", + "category", + "datetime64[ns]", + "timedelta64[ns]", + ] +) +def any_dtype_for_small_pos_integer_indexes(request): + """ + Dtypes that can be given to an Index with small positive integers. + + This means that for any dtype `x` in the params list, `Index([1, 2, 3], dtype=x)` is + valid and gives the correct Index (sub-)class. + """ + return request.param + + +def test_union_same_types(index): + # Union with a non-unique, non-monotonic index raises error + # Only needed for bool index factory + idx1 = index.sort_values() + idx2 = index.sort_values() + assert idx1.union(idx2).dtype == idx1.dtype + + +def test_union_different_types(index_flat, index_flat2, request): + # This test only considers combinations of indices + # GH 23525 + idx1 = index_flat + idx2 = index_flat2 + + if ( + not idx1.is_unique + and not idx2.is_unique + and idx1.dtype.kind == "i" + and idx2.dtype.kind == "b" + ) or ( + not idx2.is_unique + and not idx1.is_unique + and idx2.dtype.kind == "i" + and idx1.dtype.kind == "b" + ): + # Each condition had idx[1|2].is_monotonic_decreasing + # but failed when e.g. + # idx1 = Index( + # [True, True, True, True, True, True, True, True, False, False], dtype='bool' + # ) + # idx2 = Index([0, 0, 1, 1, 2, 2], dtype='int64') + mark = pytest.mark.xfail( + reason="GH#44000 True==1", raises=ValueError, strict=False + ) + request.applymarker(mark) + + common_dtype = find_common_type([idx1.dtype, idx2.dtype]) + + warn = None + msg = "'<' not supported between" + if not len(idx1) or not len(idx2): + pass + elif (idx1.dtype.kind == "c" and (not lib.is_np_dtype(idx2.dtype, "iufc"))) or ( + idx2.dtype.kind == "c" and (not lib.is_np_dtype(idx1.dtype, "iufc")) + ): + # complex objects non-sortable + warn = RuntimeWarning + elif ( + isinstance(idx1.dtype, PeriodDtype) and isinstance(idx2.dtype, CategoricalDtype) + ) or ( + isinstance(idx2.dtype, PeriodDtype) and isinstance(idx1.dtype, CategoricalDtype) + ): + warn = FutureWarning + msg = r"PeriodDtype\[B\] is deprecated" + mark = pytest.mark.xfail( + reason="Warning not produced on all builds", + raises=AssertionError, + strict=False, + ) + request.applymarker(mark) + + any_uint64 = np.uint64 in (idx1.dtype, idx2.dtype) + idx1_signed = is_signed_integer_dtype(idx1.dtype) + idx2_signed = is_signed_integer_dtype(idx2.dtype) + + # Union with a non-unique, non-monotonic index raises error + # This applies to the boolean index + idx1 = idx1.sort_values() + idx2 = idx2.sort_values() + + with tm.assert_produces_warning(warn, match=msg): + res1 = idx1.union(idx2) + res2 = idx2.union(idx1) + + if any_uint64 and (idx1_signed or idx2_signed): + assert res1.dtype == np.dtype("O") + assert res2.dtype == np.dtype("O") + else: + assert res1.dtype == common_dtype + assert res2.dtype == common_dtype + + +@pytest.mark.parametrize( + "idx1,idx2", + [ + (Index(np.arange(5), dtype=np.int64), RangeIndex(5)), + (Index(np.arange(5), dtype=np.float64), Index(np.arange(5), dtype=np.int64)), + (Index(np.arange(5), dtype=np.float64), RangeIndex(5)), + (Index(np.arange(5), dtype=np.float64), Index(np.arange(5), dtype=np.uint64)), + ], +) +def test_compatible_inconsistent_pairs(idx1, idx2): + # GH 23525 + res1 = idx1.union(idx2) + res2 = idx2.union(idx1) + + assert res1.dtype in (idx1.dtype, idx2.dtype) + assert res2.dtype in (idx1.dtype, idx2.dtype) + + +@pytest.mark.parametrize( + "left, right, expected", + [ + ("int64", "int64", "int64"), + ("int64", "uint64", "object"), + ("int64", "float64", "float64"), + ("uint64", "float64", "float64"), + ("uint64", "uint64", "uint64"), + ("float64", "float64", "float64"), + ("datetime64[ns]", "int64", "object"), + ("datetime64[ns]", "uint64", "object"), + ("datetime64[ns]", "float64", "object"), + ("datetime64[ns, CET]", "int64", "object"), + ("datetime64[ns, CET]", "uint64", "object"), + ("datetime64[ns, CET]", "float64", "object"), + ("Period[D]", "int64", "object"), + ("Period[D]", "uint64", "object"), + ("Period[D]", "float64", "object"), + ], +) +@pytest.mark.parametrize("names", [("foo", "foo", "foo"), ("foo", "bar", None)]) +def test_union_dtypes(left, right, expected, names): + left = pandas_dtype(left) + right = pandas_dtype(right) + a = Index([], dtype=left, name=names[0]) + b = Index([], dtype=right, name=names[1]) + result = a.union(b) + assert result.dtype == expected + assert result.name == names[2] + + # Testing name retention + # TODO: pin down desired dtype; do we want it to be commutative? + result = a.intersection(b) + assert result.name == names[2] + + +@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]]) +def test_intersection_duplicates(values): + # GH#31326 + a = Index(values) + b = Index([3, 3]) + result = a.intersection(b) + expected = Index([3]) + tm.assert_index_equal(result, expected) + + +class TestSetOps: + # Set operation tests shared by all indexes in the `index` fixture + @pytest.mark.parametrize("case", [0.5, "xxx"]) + @pytest.mark.parametrize( + "method", ["intersection", "union", "difference", "symmetric_difference"] + ) + def test_set_ops_error_cases(self, case, method, index): + # non-iterable input + msg = "Input must be Index or array-like" + with pytest.raises(TypeError, match=msg): + getattr(index, method)(case) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_intersection_base(self, index): + if isinstance(index, CategoricalIndex): + pytest.skip(f"Not relevant for {type(index).__name__}") + + first = index[:5].unique() + second = index[:3].unique() + intersect = first.intersection(second) + tm.assert_index_equal(intersect, second) + + if isinstance(index.dtype, DatetimeTZDtype): + # The second.values below will drop tz, so the rest of this test + # is not applicable. + return + + # GH#10149 + cases = [second.to_numpy(), second.to_series(), second.to_list()] + for case in cases: + result = first.intersection(case) + assert equal_contents(result, second) + + if isinstance(index, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.intersection([1, 2, 3]) + + @pytest.mark.filterwarnings( + "ignore:Falling back on a non-pyarrow:pandas.errors.PerformanceWarning" + ) + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_union_base(self, index): + index = index.unique() + first = index[3:] + second = index[:5] + everything = index + + union = first.union(second) + tm.assert_index_equal(union.sort_values(), everything.sort_values()) + + if isinstance(index.dtype, DatetimeTZDtype): + # The second.values below will drop tz, so the rest of this test + # is not applicable. + return + + # GH#10149 + cases = [second.to_numpy(), second.to_series(), second.to_list()] + for case in cases: + result = first.union(case) + assert equal_contents(result, everything) + + if isinstance(index, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.union([1, 2, 3]) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.filterwarnings( + "ignore:Falling back on a non-pyarrow:pandas.errors.PerformanceWarning" + ) + def test_difference_base(self, sort, index): + first = index[2:] + second = index[:4] + if index.inferred_type == "boolean": + # i think (TODO: be sure) there assumptions baked in about + # the index fixture that don't hold here? + answer = set(first).difference(set(second)) + elif isinstance(index, CategoricalIndex): + answer = [] + else: + answer = index[4:] + result = first.difference(second, sort) + assert equal_contents(result, answer) + + # GH#10149 + cases = [second.to_numpy(), second.to_series(), second.to_list()] + for case in cases: + result = first.difference(case, sort) + assert equal_contents(result, answer) + + if isinstance(index, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.difference([1, 2, 3], sort) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.filterwarnings( + "ignore:Falling back on a non-pyarrow:pandas.errors.PerformanceWarning" + ) + def test_symmetric_difference(self, index): + if isinstance(index, CategoricalIndex): + pytest.skip(f"Not relevant for {type(index).__name__}") + if len(index) < 2: + pytest.skip("Too few values for test") + if index[0] in index[1:] or index[-1] in index[:-1]: + # index fixture has e.g. an index of bools that does not satisfy this, + # another with [0, 0, 1, 1, 2, 2] + pytest.skip("Index values no not satisfy test condition.") + + first = index[1:] + second = index[:-1] + answer = index[[0, -1]] + result = first.symmetric_difference(second) + tm.assert_index_equal(result.sort_values(), answer.sort_values()) + + # GH#10149 + cases = [second.to_numpy(), second.to_series(), second.to_list()] + for case in cases: + result = first.symmetric_difference(case) + assert equal_contents(result, answer) + + if isinstance(index, MultiIndex): + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.symmetric_difference([1, 2, 3]) + + @pytest.mark.parametrize( + "fname, sname, expected_name", + [ + ("A", "A", "A"), + ("A", "B", None), + ("A", None, None), + (None, "B", None), + (None, None, None), + ], + ) + def test_corner_union(self, index_flat, fname, sname, expected_name): + # GH#9943, GH#9862 + # Test unions with various name combinations + # Do not test MultiIndex or repeats + if not index_flat.is_unique: + index = index_flat.unique() + else: + index = index_flat + + # Test copy.union(copy) + first = index.copy().set_names(fname) + second = index.copy().set_names(sname) + union = first.union(second) + expected = index.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test copy.union(empty) + first = index.copy().set_names(fname) + second = index.drop(index).set_names(sname) + union = first.union(second) + expected = index.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test empty.union(copy) + first = index.drop(index).set_names(fname) + second = index.copy().set_names(sname) + union = first.union(second) + expected = index.copy().set_names(expected_name) + tm.assert_index_equal(union, expected) + + # Test empty.union(empty) + first = index.drop(index).set_names(fname) + second = index.drop(index).set_names(sname) + union = first.union(second) + expected = index.drop(index).set_names(expected_name) + tm.assert_index_equal(union, expected) + + @pytest.mark.parametrize( + "fname, sname, expected_name", + [ + ("A", "A", "A"), + ("A", "B", None), + ("A", None, None), + (None, "B", None), + (None, None, None), + ], + ) + def test_union_unequal(self, index_flat, fname, sname, expected_name): + if not index_flat.is_unique: + index = index_flat.unique() + else: + index = index_flat + + # test copy.union(subset) - need sort for unicode and string + first = index.copy().set_names(fname) + second = index[1:].set_names(sname) + union = first.union(second).sort_values() + expected = index.set_names(expected_name).sort_values() + tm.assert_index_equal(union, expected) + + @pytest.mark.parametrize( + "fname, sname, expected_name", + [ + ("A", "A", "A"), + ("A", "B", None), + ("A", None, None), + (None, "B", None), + (None, None, None), + ], + ) + def test_corner_intersect(self, index_flat, fname, sname, expected_name): + # GH#35847 + # Test intersections with various name combinations + if not index_flat.is_unique: + index = index_flat.unique() + else: + index = index_flat + + # Test copy.intersection(copy) + first = index.copy().set_names(fname) + second = index.copy().set_names(sname) + intersect = first.intersection(second) + expected = index.copy().set_names(expected_name) + tm.assert_index_equal(intersect, expected) + + # Test copy.intersection(empty) + first = index.copy().set_names(fname) + second = index.drop(index).set_names(sname) + intersect = first.intersection(second) + expected = index.drop(index).set_names(expected_name) + tm.assert_index_equal(intersect, expected) + + # Test empty.intersection(copy) + first = index.drop(index).set_names(fname) + second = index.copy().set_names(sname) + intersect = first.intersection(second) + expected = index.drop(index).set_names(expected_name) + tm.assert_index_equal(intersect, expected) + + # Test empty.intersection(empty) + first = index.drop(index).set_names(fname) + second = index.drop(index).set_names(sname) + intersect = first.intersection(second) + expected = index.drop(index).set_names(expected_name) + tm.assert_index_equal(intersect, expected) + + @pytest.mark.parametrize( + "fname, sname, expected_name", + [ + ("A", "A", "A"), + ("A", "B", None), + ("A", None, None), + (None, "B", None), + (None, None, None), + ], + ) + def test_intersect_unequal(self, index_flat, fname, sname, expected_name): + if not index_flat.is_unique: + index = index_flat.unique() + else: + index = index_flat + + # test copy.intersection(subset) - need sort for unicode and string + first = index.copy().set_names(fname) + second = index[1:].set_names(sname) + intersect = first.intersection(second).sort_values() + expected = index[1:].set_names(expected_name).sort_values() + tm.assert_index_equal(intersect, expected) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_intersection_name_retention_with_nameless(self, index): + if isinstance(index, MultiIndex): + index = index.rename(list(range(index.nlevels))) + else: + index = index.rename("foo") + + other = np.asarray(index) + + result = index.intersection(other) + assert result.name == index.name + + # empty other, same dtype + result = index.intersection(other[:0]) + assert result.name == index.name + + # empty `self` + result = index[:0].intersection(other) + assert result.name == index.name + + def test_difference_preserves_type_empty(self, index, sort): + # GH#20040 + # If taking difference of a set and itself, it + # needs to preserve the type of the index + if not index.is_unique: + pytest.skip("Not relevant since index is not unique") + result = index.difference(index, sort=sort) + expected = index[:0] + tm.assert_index_equal(result, expected, exact=True) + + def test_difference_name_retention_equals(self, index, names): + if isinstance(index, MultiIndex): + names = [[x] * index.nlevels for x in names] + index = index.rename(names[0]) + other = index.rename(names[1]) + + assert index.equals(other) + + result = index.difference(other) + expected = index[:0].rename(names[2]) + tm.assert_index_equal(result, expected) + + def test_intersection_difference_match_empty(self, index, sort): + # GH#20040 + # Test that the intersection of an index with an + # empty index produces the same index as the difference + # of an index with itself. Test for all types + if not index.is_unique: + pytest.skip("Not relevant because index is not unique") + inter = index.intersection(index[:0]) + diff = index.difference(index, sort=sort) + tm.assert_index_equal(inter, diff, exact=True) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.filterwarnings( + "ignore:Falling back on a non-pyarrow:pandas.errors.PerformanceWarning" +) +@pytest.mark.parametrize( + "method", ["intersection", "union", "difference", "symmetric_difference"] +) +def test_setop_with_categorical(index_flat, sort, method): + # MultiIndex tested separately in tests.indexes.multi.test_setops + index = index_flat + + other = index.astype("category") + exact = "equiv" if isinstance(index, RangeIndex) else True + + result = getattr(index, method)(other, sort=sort) + expected = getattr(index, method)(index, sort=sort) + tm.assert_index_equal(result, expected, exact=exact) + + result = getattr(index, method)(other[:5], sort=sort) + expected = getattr(index, method)(index[:5], sort=sort) + tm.assert_index_equal(result, expected, exact=exact) + + +def test_intersection_duplicates_all_indexes(index): + # GH#38743 + if index.empty: + # No duplicates in empty indexes + pytest.skip("Not relevant for empty Index") + + idx = index + idx_non_unique = idx[[0, 0, 1, 2]] + + assert idx.intersection(idx_non_unique).equals(idx_non_unique.intersection(idx)) + assert idx.intersection(idx_non_unique).is_unique + + +def test_union_duplicate_index_subsets_of_each_other( + any_dtype_for_small_pos_integer_indexes, +): + # GH#31326 + dtype = any_dtype_for_small_pos_integer_indexes + a = Index([1, 2, 2, 3], dtype=dtype) + b = Index([3, 3, 4], dtype=dtype) + + expected = Index([1, 2, 2, 3, 3, 4], dtype=dtype) + if isinstance(a, CategoricalIndex): + expected = Index([1, 2, 2, 3, 3, 4]) + result = a.union(b) + tm.assert_index_equal(result, expected) + result = a.union(b, sort=False) + tm.assert_index_equal(result, expected) + + +def test_union_with_duplicate_index_and_non_monotonic( + any_dtype_for_small_pos_integer_indexes, +): + # GH#36289 + dtype = any_dtype_for_small_pos_integer_indexes + a = Index([1, 0, 0], dtype=dtype) + b = Index([0, 1], dtype=dtype) + expected = Index([0, 0, 1], dtype=dtype) + + result = a.union(b) + tm.assert_index_equal(result, expected) + + result = b.union(a) + tm.assert_index_equal(result, expected) + + +def test_union_duplicate_index_different_dtypes(): + # GH#36289 + a = Index([1, 2, 2, 3]) + b = Index(["1", "0", "0"]) + expected = Index([1, 2, 2, 3, "1", "0", "0"]) + result = a.union(b, sort=False) + tm.assert_index_equal(result, expected) + + +def test_union_same_value_duplicated_in_both(): + # GH#36289 + a = Index([0, 0, 1]) + b = Index([0, 0, 1, 2]) + result = a.union(b) + expected = Index([0, 0, 1, 2]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dup", [1, np.nan]) +def test_union_nan_in_both(dup): + # GH#36289 + a = Index([np.nan, 1, 2, 2]) + b = Index([np.nan, dup, 1, 2]) + result = a.union(b, sort=False) + expected = Index([np.nan, dup, 1.0, 2.0, 2.0]) + tm.assert_index_equal(result, expected) + + +def test_union_rangeindex_sort_true(): + # GH 53490 + idx1 = RangeIndex(1, 100, 6) + idx2 = RangeIndex(1, 50, 3) + result = idx1.union(idx2, sort=True) + expected = Index( + [ + 1, + 4, + 7, + 10, + 13, + 16, + 19, + 22, + 25, + 28, + 31, + 34, + 37, + 40, + 43, + 46, + 49, + 55, + 61, + 67, + 73, + 79, + 85, + 91, + 97, + ] + ) + tm.assert_index_equal(result, expected) + + +def test_union_with_duplicate_index_not_subset_and_non_monotonic( + any_dtype_for_small_pos_integer_indexes, +): + # GH#36289 + dtype = any_dtype_for_small_pos_integer_indexes + a = Index([1, 0, 2], dtype=dtype) + b = Index([0, 0, 1], dtype=dtype) + expected = Index([0, 0, 1, 2], dtype=dtype) + if isinstance(a, CategoricalIndex): + expected = Index([0, 0, 1, 2]) + + result = a.union(b) + tm.assert_index_equal(result, expected) + + result = b.union(a) + tm.assert_index_equal(result, expected) + + +def test_union_int_categorical_with_nan(): + ci = CategoricalIndex([1, 2, np.nan]) + assert ci.categories.dtype.kind == "i" + + idx = Index([1, 2]) + + result = idx.union(ci) + expected = Index([1, 2, np.nan], dtype=np.float64) + tm.assert_index_equal(result, expected) + + result = ci.union(idx) + tm.assert_index_equal(result, expected) + + +class TestSetOpsUnsorted: + # These may eventually belong in a dtype-specific test_setops, or + # parametrized over a more general fixture + def test_intersect_str_dates(self): + dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] + + index1 = Index(dt_dates, dtype=object) + index2 = Index(["aa"], dtype=object) + result = index2.intersection(index1) + + expected = Index([], dtype=object) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_intersection(self, index, sort): + first = index[:20] + second = index[:10] + intersect = first.intersection(second, sort=sort) + if sort in (None, False): + tm.assert_index_equal(intersect.sort_values(), second.sort_values()) + else: + tm.assert_index_equal(intersect, second) + + # Corner cases + inter = first.intersection(first, sort=sort) + assert inter is first + + @pytest.mark.parametrize( + "index2,keeps_name", + [ + (Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name + (Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names + (Index([3, 4, 5, 6, 7]), False), + ], + ) + def test_intersection_name_preservation(self, index2, keeps_name, sort): + index1 = Index([1, 2, 3, 4, 5], name="index") + expected = Index([3, 4, 5]) + result = index1.intersection(index2, sort) + + if keeps_name: + expected.name = "index" + + assert result.name == expected.name + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + @pytest.mark.parametrize( + "first_name,second_name,expected_name", + [("A", "A", "A"), ("A", "B", None), (None, "B", None)], + ) + def test_intersection_name_preservation2( + self, index, first_name, second_name, expected_name, sort + ): + first = index[5:20] + second = index[:10] + first.name = first_name + second.name = second_name + intersect = first.intersection(second, sort=sort) + assert intersect.name == expected_name + + def test_chained_union(self, sort): + # Chained unions handles names correctly + i1 = Index([1, 2], name="i1") + i2 = Index([5, 6], name="i2") + i3 = Index([3, 4], name="i3") + union = i1.union(i2.union(i3, sort=sort), sort=sort) + expected = i1.union(i2, sort=sort).union(i3, sort=sort) + tm.assert_index_equal(union, expected) + + j1 = Index([1, 2], name="j1") + j2 = Index([], name="j2") + j3 = Index([], name="j3") + union = j1.union(j2.union(j3, sort=sort), sort=sort) + expected = j1.union(j2, sort=sort).union(j3, sort=sort) + tm.assert_index_equal(union, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_union(self, index, sort): + first = index[5:20] + second = index[:10] + everything = index[:20] + + union = first.union(second, sort=sort) + if sort in (None, False): + tm.assert_index_equal(union.sort_values(), everything.sort_values()) + else: + tm.assert_index_equal(union, everything) + + @pytest.mark.parametrize("klass", [np.array, Series, list]) + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_union_from_iterables(self, index, klass, sort): + # GH#10149 + first = index[5:20] + second = index[:10] + everything = index[:20] + + case = klass(second.values) + result = first.union(case, sort=sort) + if sort in (None, False): + tm.assert_index_equal(result.sort_values(), everything.sort_values()) + else: + tm.assert_index_equal(result, everything) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_union_identity(self, index, sort): + first = index[5:20] + + union = first.union(first, sort=sort) + # i.e. identity is not preserved when sort is True + assert (union is first) is (not sort) + + # This should no longer be the same object, since [] is not consistent, + # both objects will be recast to dtype('O') + union = first.union(Index([], dtype=first.dtype), sort=sort) + assert (union is first) is (not sort) + + union = Index([], dtype=first.dtype).union(first, sort=sort) + assert (union is first) is (not sort) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + @pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")]) + def test_difference_name_preservation(self, index, second_name, expected, sort): + first = index[5:20] + second = index[:10] + answer = index[10:20] + + first.name = "name" + second.name = second_name + result = first.difference(second, sort=sort) + + if sort is True: + tm.assert_index_equal(result, answer) + else: + answer.name = second_name + tm.assert_index_equal(result.sort_values(), answer.sort_values()) + + if expected is None: + assert result.name is None + else: + assert result.name == expected + + def test_difference_empty_arg(self, index, sort): + first = index.copy() + first = first[5:20] + first.name = "name" + result = first.difference([], sort) + expected = index[5:20].unique() + expected.name = "name" + tm.assert_index_equal(result, expected) + + def test_difference_should_not_compare(self): + # GH 55113 + left = Index([1, 1]) + right = Index([True]) + result = left.difference(right) + expected = Index([1]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_difference_identity(self, index, sort): + first = index[5:20] + first.name = "name" + result = first.difference(first, sort) + + assert len(result) == 0 + assert result.name == first.name + + @pytest.mark.parametrize("index", ["string"], indirect=True) + def test_difference_sort(self, index, sort): + first = index[5:20] + second = index[:10] + + result = first.difference(second, sort) + expected = index[10:20] + + if sort is None: + expected = expected.sort_values() + + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"]) + def test_difference_incomparable(self, opname): + a = Index([3, Timestamp("2000"), 1]) + b = Index([2, Timestamp("1999"), 1]) + op = operator.methodcaller(opname, b) + + with tm.assert_produces_warning(RuntimeWarning): + # sort=None, the default + result = op(a) + expected = Index([3, Timestamp("2000"), 2, Timestamp("1999")]) + if opname == "difference": + expected = expected[:2] + tm.assert_index_equal(result, expected) + + # sort=False + op = operator.methodcaller(opname, b, sort=False) + result = op(a) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"]) + def test_difference_incomparable_true(self, opname): + a = Index([3, Timestamp("2000"), 1]) + b = Index([2, Timestamp("1999"), 1]) + op = operator.methodcaller(opname, b, sort=True) + + msg = "'<' not supported between instances of 'Timestamp' and 'int'" + with pytest.raises(TypeError, match=msg): + op(a) + + def test_symmetric_difference_mi(self, sort): + index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])) + index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)]) + result = index1.symmetric_difference(index2, sort=sort) + expected = MultiIndex.from_tuples([("bar", 2), ("baz", 3), ("bar", 3)]) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "index2,expected", + [ + (Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])), + (Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0])), + ], + ) + def test_symmetric_difference_missing(self, index2, expected, sort): + # GH#13514 change: {nan} - {nan} == {} + # (GH#6444, sorting of nans, is no longer an issue) + index1 = Index([1, np.nan, 2, 3]) + + result = index1.symmetric_difference(index2, sort=sort) + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + def test_symmetric_difference_non_index(self, sort): + index1 = Index([1, 2, 3, 4], name="index1") + index2 = np.array([2, 3, 4, 5]) + expected = Index([1, 5], name="index1") + result = index1.symmetric_difference(index2, sort=sort) + if sort in (None, True): + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result.sort_values(), expected) + assert result.name == "index1" + + result = index1.symmetric_difference(index2, result_name="new_name", sort=sort) + expected.name = "new_name" + if sort in (None, True): + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result.sort_values(), expected) + assert result.name == "new_name" + + def test_union_ea_dtypes(self, any_numeric_ea_and_arrow_dtype): + # GH#51365 + idx = Index([1, 2, 3], dtype=any_numeric_ea_and_arrow_dtype) + idx2 = Index([3, 4, 5], dtype=any_numeric_ea_and_arrow_dtype) + result = idx.union(idx2) + expected = Index([1, 2, 3, 4, 5], dtype=any_numeric_ea_and_arrow_dtype) + tm.assert_index_equal(result, expected) + + def test_union_string_array(self, any_string_dtype): + idx1 = Index(["a"], dtype=any_string_dtype) + idx2 = Index(["b"], dtype=any_string_dtype) + result = idx1.union(idx2) + expected = Index(["a", "b"], dtype=any_string_dtype) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_subclass.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_subclass.py new file mode 100644 index 0000000000000000000000000000000000000000..c3287e1ddcddcedc14857f2299798d3957830921 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/test_subclass.py @@ -0,0 +1,40 @@ +""" +Tests involving custom Index subclasses +""" +import numpy as np + +from pandas import ( + DataFrame, + Index, +) +import pandas._testing as tm + + +class CustomIndex(Index): + def __new__(cls, data, name=None): + # assert that this index class cannot hold strings + if any(isinstance(val, str) for val in data): + raise TypeError("CustomIndex cannot hold strings") + + if name is None and hasattr(data, "name"): + name = data.name + data = np.array(data, dtype="O") + + return cls._simple_new(data, name) + + +def test_insert_fallback_to_base_index(): + # https://github.com/pandas-dev/pandas/issues/47071 + + idx = CustomIndex([1, 2, 3]) + result = idx.insert(0, "string") + expected = Index(["string", 1, 2, 3], dtype=object) + tm.assert_index_equal(result, expected) + + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 3)), + columns=idx, + index=Index([1, 2], name="string"), + ) + result = df.reset_index() + tm.assert_index_equal(result.columns, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79ee5ea6d3cb076e4fafdbfb2d5d8f47dc3a735d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96976731b380de8fce106b32b1209797c718fc60 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_crosstab.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04d4556bdd26536bdee4ccf96291b041da1b3262 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_cut.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..178f0d87f1ffceee9411e67999df618ab4e6becc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_from_dummies.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..855a529dd3d54701f06498dc0d6e64fee94f7642 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_get_dummies.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c530c43711ea2adfdf08367c5e4f0bcc019f6f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_melt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6a0b6f47c467dde902fa490f9b36a6a97cc8e1c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1790c114c88301dafe2ba3307cca2e0ffcf3c666 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_pivot_multilevel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d90a325cf6baae61d0b53b67738c6b5649e323db Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_qcut.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9954334e9264ab4c9484b921eafe4bbdc675e6ca Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_union_categoricals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..513a5979ec1166267ad1c07e7bed80cdb66e8724 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/__pycache__/test_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaf6fb89d9bc7546f009e4918532c84b3830e15d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca2f8b1075baf490c0b304ef5ec989d9a7bc316a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f09cc1afe4dc338b2d955bf64f59bf3a8d3b3f50 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba829a7886f169444d09f2bb170f47b441512c86 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_append_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f5135d4958b48e2f9b03cd0aaedde3b99cfaf3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_categorical.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95286f55c91e75d1f3260f159a0775b7c4e285f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_concat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be1d54d7aec0e073a31c6c77a86d2f305312f5cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_dataframe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e1d31f9c8f9e2d1b777979fb60a637a65efa530 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_datetimes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75b1bd6665ca7861ed1e821153a43290a972973d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_empty.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e77bb770e35b1b93be09d1f6e8911da01efed0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_index.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b907bf3291542a52400923a031ab3956dbe5b63 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_invalid.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e2fe32988bd0899515116913b40f03c02e0da06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_series.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8cdee011c1f80b0e2c15099466371e2a014b5593 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/__pycache__/test_sort.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..62b8c59ba8855b2ef4273a33b475b2e16b7b3d1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/conftest.py @@ -0,0 +1,7 @@ +import pytest + + +@pytest.fixture(params=[True, False]) +def sort(request): + """Boolean sort keyword for concat and DataFrame.append.""" + return request.param diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py new file mode 100644 index 0000000000000000000000000000000000000000..81ca227fb7afb9768909f3cca7907fdaf74c430a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append.py @@ -0,0 +1,389 @@ +import datetime as dt +from itertools import combinations + +import dateutil +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, + concat, + isna, +) +import pandas._testing as tm + + +class TestAppend: + def test_append(self, sort, float_frame): + mixed_frame = float_frame.copy() + mixed_frame["foo"] = "bar" + + begin_index = float_frame.index[:5] + end_index = float_frame.index[5:] + + begin_frame = float_frame.reindex(begin_index) + end_frame = float_frame.reindex(end_index) + + appended = begin_frame._append(end_frame) + tm.assert_almost_equal(appended["A"], float_frame["A"]) + + del end_frame["A"] + partial_appended = begin_frame._append(end_frame, sort=sort) + assert "A" in partial_appended + + partial_appended = end_frame._append(begin_frame, sort=sort) + assert "A" in partial_appended + + # mixed type handling + appended = mixed_frame[:5]._append(mixed_frame[5:]) + tm.assert_frame_equal(appended, mixed_frame) + + # what to test here + mixed_appended = mixed_frame[:5]._append(float_frame[5:], sort=sort) + mixed_appended2 = float_frame[:5]._append(mixed_frame[5:], sort=sort) + + # all equal except 'foo' column + tm.assert_frame_equal( + mixed_appended.reindex(columns=["A", "B", "C", "D"]), + mixed_appended2.reindex(columns=["A", "B", "C", "D"]), + ) + + def test_append_empty(self, float_frame): + empty = DataFrame() + + appended = float_frame._append(empty) + tm.assert_frame_equal(float_frame, appended) + assert appended is not float_frame + + appended = empty._append(float_frame) + tm.assert_frame_equal(float_frame, appended) + assert appended is not float_frame + + def test_append_overlap_raises(self, float_frame): + msg = "Indexes have overlapping values" + with pytest.raises(ValueError, match=msg): + float_frame._append(float_frame, verify_integrity=True) + + def test_append_new_columns(self): + # see gh-6129: new columns + df = DataFrame({"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}}) + row = Series([5, 6, 7], index=["a", "b", "c"], name="z") + expected = DataFrame( + { + "a": {"x": 1, "y": 2, "z": 5}, + "b": {"x": 3, "y": 4, "z": 6}, + "c": {"z": 7}, + } + ) + result = df._append(row) + tm.assert_frame_equal(result, expected) + + def test_append_length0_frame(self, sort): + df = DataFrame(columns=["A", "B", "C"]) + df3 = DataFrame(index=[0, 1], columns=["A", "B"]) + df5 = df._append(df3, sort=sort) + + expected = DataFrame(index=[0, 1], columns=["A", "B", "C"]) + tm.assert_frame_equal(df5, expected) + + def test_append_records(self): + arr1 = np.zeros((2,), dtype=("i4,f4,S10")) + arr1[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")] + + arr2 = np.zeros((3,), dtype=("i4,f4,S10")) + arr2[:] = [(3, 4.0, "foo"), (5, 6.0, "bar"), (7.0, 8.0, "baz")] + + df1 = DataFrame(arr1) + df2 = DataFrame(arr2) + + result = df1._append(df2, ignore_index=True) + expected = DataFrame(np.concatenate((arr1, arr2))) + tm.assert_frame_equal(result, expected) + + # rewrite sort fixture, since we also want to test default of None + def test_append_sorts(self, sort): + df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) + df2 = DataFrame({"a": [1, 2], "c": [3, 4]}, index=[2, 3]) + + result = df1._append(df2, sort=sort) + + # for None / True + expected = DataFrame( + {"b": [1, 2, None, None], "a": [1, 2, 1, 2], "c": [None, None, 3, 4]}, + columns=["a", "b", "c"], + ) + if sort is False: + expected = expected[["b", "a", "c"]] + tm.assert_frame_equal(result, expected) + + def test_append_different_columns(self, sort): + df = DataFrame( + { + "bools": np.random.default_rng(2).standard_normal(10) > 0, + "ints": np.random.default_rng(2).integers(0, 10, 10), + "floats": np.random.default_rng(2).standard_normal(10), + "strings": ["foo", "bar"] * 5, + } + ) + + a = df[:5].loc[:, ["bools", "ints", "floats"]] + b = df[5:].loc[:, ["strings", "ints", "floats"]] + + appended = a._append(b, sort=sort) + assert isna(appended["strings"][0:4]).all() + assert isna(appended["bools"][5:]).all() + + def test_append_many(self, sort, float_frame): + chunks = [ + float_frame[:5], + float_frame[5:10], + float_frame[10:15], + float_frame[15:], + ] + + result = chunks[0]._append(chunks[1:]) + tm.assert_frame_equal(result, float_frame) + + chunks[-1] = chunks[-1].copy() + chunks[-1]["foo"] = "bar" + result = chunks[0]._append(chunks[1:], sort=sort) + tm.assert_frame_equal(result.loc[:, float_frame.columns], float_frame) + assert (result["foo"][15:] == "bar").all() + assert result["foo"][:15].isna().all() + + def test_append_preserve_index_name(self): + # #980 + df1 = DataFrame(columns=["A", "B", "C"]) + df1 = df1.set_index(["A"]) + df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]], columns=["A", "B", "C"]) + df2 = df2.set_index(["A"]) + + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df1._append(df2) + assert result.index.name == "A" + + indexes_can_append = [ + pd.RangeIndex(3), + Index([4, 5, 6]), + Index([4.5, 5.5, 6.5]), + Index(list("abc")), + pd.CategoricalIndex("A B C".split()), + pd.CategoricalIndex("D E F".split(), ordered=True), + pd.IntervalIndex.from_breaks([7, 8, 9, 10]), + pd.DatetimeIndex( + [ + dt.datetime(2013, 1, 3, 0, 0), + dt.datetime(2013, 1, 3, 6, 10), + dt.datetime(2013, 1, 3, 7, 12), + ] + ), + pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]), + ] + + @pytest.mark.parametrize( + "index", indexes_can_append, ids=lambda x: type(x).__name__ + ) + def test_append_same_columns_type(self, index): + # GH18359 + + # df wider than ser + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index) + ser_index = index[:2] + ser = Series([7, 8], index=ser_index, name=2) + result = df._append(ser) + expected = DataFrame( + [[1, 2, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index + ) + # integer dtype is preserved for columns present in ser.index + assert expected.dtypes.iloc[0].kind == "i" + assert expected.dtypes.iloc[1].kind == "i" + + tm.assert_frame_equal(result, expected) + + # ser wider than df + ser_index = index + index = index[:2] + df = DataFrame([[1, 2], [4, 5]], columns=index) + ser = Series([7, 8, 9], index=ser_index, name=2) + result = df._append(ser) + expected = DataFrame( + [[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]], + index=[0, 1, 2], + columns=ser_index, + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "df_columns, series_index", + combinations(indexes_can_append, r=2), + ids=lambda x: type(x).__name__, + ) + def test_append_different_columns_types(self, df_columns, series_index): + # GH18359 + # See also test 'test_append_different_columns_types_raises' below + # for errors raised when appending + + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns) + ser = Series([7, 8, 9], index=series_index, name=2) + + result = df._append(ser) + idx_diff = ser.index.difference(df_columns) + combined_columns = Index(df_columns.tolist()).append(idx_diff) + expected = DataFrame( + [ + [1.0, 2.0, 3.0, np.nan, np.nan, np.nan], + [4, 5, 6, np.nan, np.nan, np.nan], + [np.nan, np.nan, np.nan, 7, 8, 9], + ], + index=[0, 1, 2], + columns=combined_columns, + ) + tm.assert_frame_equal(result, expected) + + def test_append_dtype_coerce(self, sort): + # GH 4993 + # appending with datetime will incorrectly convert datetime64 + + df1 = DataFrame( + index=[1, 2], + data=[dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0)], + columns=["start_time"], + ) + df2 = DataFrame( + index=[4, 5], + data=[ + [dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 3, 6, 10)], + [dt.datetime(2013, 1, 4, 0, 0), dt.datetime(2013, 1, 4, 7, 10)], + ], + columns=["start_time", "end_time"], + ) + + expected = concat( + [ + Series( + [ + pd.NaT, + pd.NaT, + dt.datetime(2013, 1, 3, 6, 10), + dt.datetime(2013, 1, 4, 7, 10), + ], + name="end_time", + ), + Series( + [ + dt.datetime(2013, 1, 1, 0, 0), + dt.datetime(2013, 1, 2, 0, 0), + dt.datetime(2013, 1, 3, 0, 0), + dt.datetime(2013, 1, 4, 0, 0), + ], + name="start_time", + ), + ], + axis=1, + sort=sort, + ) + result = df1._append(df2, ignore_index=True, sort=sort) + if sort: + expected = expected[["end_time", "start_time"]] + else: + expected = expected[["start_time", "end_time"]] + + tm.assert_frame_equal(result, expected) + + def test_append_missing_column_proper_upcast(self, sort): + df1 = DataFrame({"A": np.array([1, 2, 3, 4], dtype="i8")}) + df2 = DataFrame({"B": np.array([True, False, True, False], dtype=bool)}) + + appended = df1._append(df2, ignore_index=True, sort=sort) + assert appended["A"].dtype == "f8" + assert appended["B"].dtype == "O" + + def test_append_empty_frame_to_series_with_dateutil_tz(self): + # GH 23682 + date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc()) + ser = Series({"a": 1.0, "b": 2.0, "date": date}) + df = DataFrame(columns=["c", "d"]) + result_a = df._append(ser, ignore_index=True) + expected = DataFrame( + [[np.nan, np.nan, 1.0, 2.0, date]], columns=["c", "d", "a", "b", "date"] + ) + # These columns get cast to object after append + expected["c"] = expected["c"].astype(object) + expected["d"] = expected["d"].astype(object) + tm.assert_frame_equal(result_a, expected) + + expected = DataFrame( + [[np.nan, np.nan, 1.0, 2.0, date]] * 2, columns=["c", "d", "a", "b", "date"] + ) + expected["c"] = expected["c"].astype(object) + expected["d"] = expected["d"].astype(object) + result_b = result_a._append(ser, ignore_index=True) + tm.assert_frame_equal(result_b, expected) + + result = df._append([ser, ser], ignore_index=True) + tm.assert_frame_equal(result, expected) + + def test_append_empty_tz_frame_with_datetime64ns(self, using_array_manager): + # https://github.com/pandas-dev/pandas/issues/35460 + df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + + # pd.NaT gets inferred as tz-naive, so append result is tz-naive + result = df._append({"a": pd.NaT}, ignore_index=True) + if using_array_manager: + expected = DataFrame({"a": [pd.NaT]}, dtype=object) + else: + expected = DataFrame({"a": [np.nan]}, dtype=object) + tm.assert_frame_equal(result, expected) + + # also test with typed value to append + df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") + other = Series({"a": pd.NaT}, dtype="datetime64[ns]") + result = df._append(other, ignore_index=True) + tm.assert_frame_equal(result, expected) + + # mismatched tz + other = Series({"a": pd.NaT}, dtype="datetime64[ns, US/Pacific]") + result = df._append(other, ignore_index=True) + expected = DataFrame({"a": [pd.NaT]}).astype(object) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"] + ) + @pytest.mark.parametrize("val", [1, "NaT"]) + def test_append_empty_frame_with_timedelta64ns_nat( + self, dtype_str, val, using_array_manager + ): + # https://github.com/pandas-dev/pandas/issues/35460 + df = DataFrame(columns=["a"]).astype(dtype_str) + + other = DataFrame({"a": [np.timedelta64(val, "ns")]}) + result = df._append(other, ignore_index=True) + + expected = other.astype(object) + if isinstance(val, str) and dtype_str != "int64" and not using_array_manager: + # TODO: expected used to be `other.astype(object)` which is a more + # reasonable result. This was changed when tightening + # assert_frame_equal's treatment of mismatched NAs to match the + # existing behavior. + expected = DataFrame({"a": [np.nan]}, dtype=object) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"] + ) + @pytest.mark.parametrize("val", [1, "NaT"]) + def test_append_frame_with_timedelta64ns_nat(self, dtype_str, val): + # https://github.com/pandas-dev/pandas/issues/35460 + df = DataFrame({"a": pd.array([1], dtype=dtype_str)}) + + other = DataFrame({"a": [np.timedelta64(val, "ns")]}) + result = df._append(other, ignore_index=True) + + expected = DataFrame({"a": [df.iloc[0, 0], other.iloc[0, 0]]}, dtype=object) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py new file mode 100644 index 0000000000000000000000000000000000000000..31c3ef317622214efe302aae4b87a6bb6592a965 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_append_common.py @@ -0,0 +1,753 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.fixture( + params=list( + { + "bool": [True, False, True], + "int64": [1, 2, 3], + "float64": [1.1, np.nan, 3.3], + "category": Categorical(["X", "Y", "Z"]), + "object": ["a", "b", "c"], + "datetime64[ns]": [ + pd.Timestamp("2011-01-01"), + pd.Timestamp("2011-01-02"), + pd.Timestamp("2011-01-03"), + ], + "datetime64[ns, US/Eastern]": [ + pd.Timestamp("2011-01-01", tz="US/Eastern"), + pd.Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timestamp("2011-01-03", tz="US/Eastern"), + ], + "timedelta64[ns]": [ + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + ], + "period[M]": [ + pd.Period("2011-01", freq="M"), + pd.Period("2011-02", freq="M"), + pd.Period("2011-03", freq="M"), + ], + }.items() + ) +) +def item(request): + key, data = request.param + return key, data + + +@pytest.fixture +def item2(item): + return item + + +class TestConcatAppendCommon: + """ + Test common dtype coercion rules between concat and append. + """ + + def test_dtypes(self, item, index_or_series, using_infer_string): + # to confirm test case covers intended dtypes + typ, vals = item + obj = index_or_series(vals) + if typ == "object" and using_infer_string: + typ = "string" + if isinstance(obj, Index): + assert obj.dtype == typ + elif isinstance(obj, Series): + if typ.startswith("period"): + assert obj.dtype == "Period[M]" + else: + assert obj.dtype == typ + + def test_concatlike_same_dtypes(self, item): + # GH 13660 + typ1, vals1 = item + + vals2 = vals1 + vals3 = vals1 + + if typ1 == "category": + exp_data = Categorical(list(vals1) + list(vals2)) + exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3)) + else: + exp_data = vals1 + vals2 + exp_data3 = vals1 + vals2 + vals3 + + # ----- Index ----- # + + # index.append + res = Index(vals1).append(Index(vals2)) + exp = Index(exp_data) + tm.assert_index_equal(res, exp) + + # 3 elements + res = Index(vals1).append([Index(vals2), Index(vals3)]) + exp = Index(exp_data3) + tm.assert_index_equal(res, exp) + + # index.append name mismatch + i1 = Index(vals1, name="x") + i2 = Index(vals2, name="y") + res = i1.append(i2) + exp = Index(exp_data) + tm.assert_index_equal(res, exp) + + # index.append name match + i1 = Index(vals1, name="x") + i2 = Index(vals2, name="x") + res = i1.append(i2) + exp = Index(exp_data, name="x") + tm.assert_index_equal(res, exp) + + # cannot append non-index + with pytest.raises(TypeError, match="all inputs must be Index"): + Index(vals1).append(vals2) + + with pytest.raises(TypeError, match="all inputs must be Index"): + Index(vals1).append([Index(vals2), vals3]) + + # ----- Series ----- # + + # series.append + res = Series(vals1)._append(Series(vals2), ignore_index=True) + exp = Series(exp_data) + tm.assert_series_equal(res, exp, check_index_type=True) + + # concat + res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True) + tm.assert_series_equal(res, exp, check_index_type=True) + + # 3 elements + res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True) + exp = Series(exp_data3) + tm.assert_series_equal(res, exp) + + res = pd.concat( + [Series(vals1), Series(vals2), Series(vals3)], + ignore_index=True, + ) + tm.assert_series_equal(res, exp) + + # name mismatch + s1 = Series(vals1, name="x") + s2 = Series(vals2, name="y") + res = s1._append(s2, ignore_index=True) + exp = Series(exp_data) + tm.assert_series_equal(res, exp, check_index_type=True) + + res = pd.concat([s1, s2], ignore_index=True) + tm.assert_series_equal(res, exp, check_index_type=True) + + # name match + s1 = Series(vals1, name="x") + s2 = Series(vals2, name="x") + res = s1._append(s2, ignore_index=True) + exp = Series(exp_data, name="x") + tm.assert_series_equal(res, exp, check_index_type=True) + + res = pd.concat([s1, s2], ignore_index=True) + tm.assert_series_equal(res, exp, check_index_type=True) + + # cannot append non-index + msg = ( + r"cannot concatenate object of type '.+'; " + "only Series and DataFrame objs are valid" + ) + with pytest.raises(TypeError, match=msg): + Series(vals1)._append(vals2) + + with pytest.raises(TypeError, match=msg): + Series(vals1)._append([Series(vals2), vals3]) + + with pytest.raises(TypeError, match=msg): + pd.concat([Series(vals1), vals2]) + + with pytest.raises(TypeError, match=msg): + pd.concat([Series(vals1), Series(vals2), vals3]) + + def test_concatlike_dtypes_coercion(self, item, item2, request): + # GH 13660 + typ1, vals1 = item + typ2, vals2 = item2 + + vals3 = vals2 + + # basically infer + exp_index_dtype = None + exp_series_dtype = None + + if typ1 == typ2: + pytest.skip("same dtype is tested in test_concatlike_same_dtypes") + elif typ1 == "category" or typ2 == "category": + pytest.skip("categorical type tested elsewhere") + + # specify expected dtype + if typ1 == "bool" and typ2 in ("int64", "float64"): + # series coerces to numeric based on numpy rule + # index doesn't because bool is object dtype + exp_series_dtype = typ2 + mark = pytest.mark.xfail(reason="GH#39187 casting to object") + request.applymarker(mark) + elif typ2 == "bool" and typ1 in ("int64", "float64"): + exp_series_dtype = typ1 + mark = pytest.mark.xfail(reason="GH#39187 casting to object") + request.applymarker(mark) + elif typ1 in {"datetime64[ns, US/Eastern]", "timedelta64[ns]"} or typ2 in { + "datetime64[ns, US/Eastern]", + "timedelta64[ns]", + }: + exp_index_dtype = object + exp_series_dtype = object + + exp_data = vals1 + vals2 + exp_data3 = vals1 + vals2 + vals3 + + # ----- Index ----- # + + # index.append + # GH#39817 + res = Index(vals1).append(Index(vals2)) + exp = Index(exp_data, dtype=exp_index_dtype) + tm.assert_index_equal(res, exp) + + # 3 elements + res = Index(vals1).append([Index(vals2), Index(vals3)]) + exp = Index(exp_data3, dtype=exp_index_dtype) + tm.assert_index_equal(res, exp) + + # ----- Series ----- # + + # series._append + # GH#39817 + res = Series(vals1)._append(Series(vals2), ignore_index=True) + exp = Series(exp_data, dtype=exp_series_dtype) + tm.assert_series_equal(res, exp, check_index_type=True) + + # concat + # GH#39817 + res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True) + tm.assert_series_equal(res, exp, check_index_type=True) + + # 3 elements + # GH#39817 + res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True) + exp = Series(exp_data3, dtype=exp_series_dtype) + tm.assert_series_equal(res, exp) + + # GH#39817 + res = pd.concat( + [Series(vals1), Series(vals2), Series(vals3)], + ignore_index=True, + ) + tm.assert_series_equal(res, exp) + + def test_concatlike_common_coerce_to_pandas_object(self): + # GH 13626 + # result must be Timestamp/Timedelta, not datetime.datetime/timedelta + dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"]) + tdi = pd.TimedeltaIndex(["1 days", "2 days"]) + + exp = Index( + [ + pd.Timestamp("2011-01-01"), + pd.Timestamp("2011-01-02"), + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + ] + ) + + res = dti.append(tdi) + tm.assert_index_equal(res, exp) + assert isinstance(res[0], pd.Timestamp) + assert isinstance(res[-1], pd.Timedelta) + + dts = Series(dti) + tds = Series(tdi) + res = dts._append(tds) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + assert isinstance(res.iloc[0], pd.Timestamp) + assert isinstance(res.iloc[-1], pd.Timedelta) + + res = pd.concat([dts, tds]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + assert isinstance(res.iloc[0], pd.Timestamp) + assert isinstance(res.iloc[-1], pd.Timedelta) + + def test_concatlike_datetimetz(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH 7795 + dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) + dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz) + + exp = pd.DatetimeIndex( + ["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz + ) + + res = dti1.append(dti2) + tm.assert_index_equal(res, exp) + + dts1 = Series(dti1) + dts2 = Series(dti2) + res = dts1._append(dts2) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([dts1, dts2]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + @pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"]) + def test_concatlike_datetimetz_short(self, tz): + # GH#7795 + ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz) + ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz) + df1 = DataFrame(0, index=ix1, columns=["A", "B"]) + df2 = DataFrame(0, index=ix2, columns=["A", "B"]) + + exp_idx = pd.DatetimeIndex( + ["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"], + tz=tz, + ).as_unit("ns") + exp = DataFrame(0, index=exp_idx, columns=["A", "B"]) + + tm.assert_frame_equal(df1._append(df2), exp) + tm.assert_frame_equal(pd.concat([df1, df2]), exp) + + def test_concatlike_datetimetz_to_object(self, tz_aware_fixture): + tz = tz_aware_fixture + # GH 13660 + + # different tz coerces to object + dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz) + dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"]) + + exp = Index( + [ + pd.Timestamp("2011-01-01", tz=tz), + pd.Timestamp("2011-01-02", tz=tz), + pd.Timestamp("2012-01-01"), + pd.Timestamp("2012-01-02"), + ], + dtype=object, + ) + + res = dti1.append(dti2) + tm.assert_index_equal(res, exp) + + dts1 = Series(dti1) + dts2 = Series(dti2) + res = dts1._append(dts2) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([dts1, dts2]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + # different tz + dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific") + + exp = Index( + [ + pd.Timestamp("2011-01-01", tz=tz), + pd.Timestamp("2011-01-02", tz=tz), + pd.Timestamp("2012-01-01", tz="US/Pacific"), + pd.Timestamp("2012-01-02", tz="US/Pacific"), + ], + dtype=object, + ) + + res = dti1.append(dti3) + tm.assert_index_equal(res, exp) + + dts1 = Series(dti1) + dts3 = Series(dti3) + res = dts1._append(dts3) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([dts1, dts3]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + def test_concatlike_common_period(self): + # GH 13660 + pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") + pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M") + + exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M") + + res = pi1.append(pi2) + tm.assert_index_equal(res, exp) + + ps1 = Series(pi1) + ps2 = Series(pi2) + res = ps1._append(ps2) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([ps1, ps2]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + def test_concatlike_common_period_diff_freq_to_object(self): + # GH 13221 + pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") + pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D") + + exp = Index( + [ + pd.Period("2011-01", freq="M"), + pd.Period("2011-02", freq="M"), + pd.Period("2012-01-01", freq="D"), + pd.Period("2012-02-01", freq="D"), + ], + dtype=object, + ) + + res = pi1.append(pi2) + tm.assert_index_equal(res, exp) + + ps1 = Series(pi1) + ps2 = Series(pi2) + res = ps1._append(ps2) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([ps1, ps2]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + def test_concatlike_common_period_mixed_dt_to_object(self): + # GH 13221 + # different datetimelike + pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M") + tdi = pd.TimedeltaIndex(["1 days", "2 days"]) + exp = Index( + [ + pd.Period("2011-01", freq="M"), + pd.Period("2011-02", freq="M"), + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + ], + dtype=object, + ) + + res = pi1.append(tdi) + tm.assert_index_equal(res, exp) + + ps1 = Series(pi1) + tds = Series(tdi) + res = ps1._append(tds) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([ps1, tds]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + # inverse + exp = Index( + [ + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Period("2011-01", freq="M"), + pd.Period("2011-02", freq="M"), + ], + dtype=object, + ) + + res = tdi.append(pi1) + tm.assert_index_equal(res, exp) + + ps1 = Series(pi1) + tds = Series(tdi) + res = tds._append(ps1) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + res = pd.concat([tds, ps1]) + tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1])) + + def test_concat_categorical(self): + # GH 13524 + + # same categories -> category + s1 = Series([1, 2, np.nan], dtype="category") + s2 = Series([2, 1, 2], dtype="category") + + exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category") + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + # partially different categories => not-category + s1 = Series([3, 2], dtype="category") + s2 = Series([2, 1], dtype="category") + + exp = Series([3, 2, 2, 1]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + # completely different categories (same dtype) => not-category + s1 = Series([10, 11, np.nan], dtype="category") + s2 = Series([np.nan, 1, 3, 2], dtype="category") + + exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + def test_union_categorical_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19096 + a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"])) + b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"])) + result = pd.concat([a, b], ignore_index=True) + expected = Series( + Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"]) + ) + tm.assert_series_equal(result, expected) + + def test_concat_categorical_coercion(self): + # GH 13524 + + # category + not-category => not-category + s1 = Series([1, 2, np.nan], dtype="category") + s2 = Series([2, 1, 2]) + + exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + # result shouldn't be affected by 1st elem dtype + exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # all values are not in category => not-category + s1 = Series([3, 2], dtype="category") + s2 = Series([2, 1]) + + exp = Series([3, 2, 2, 1]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series([2, 1, 3, 2]) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # completely different categories => not-category + s1 = Series([10, 11, np.nan], dtype="category") + s2 = Series([1, 3, 2]) + + exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # different dtype => not-category + s1 = Series([10, 11, np.nan], dtype="category") + s2 = Series(["a", "b", "c"]) + + exp = Series([10, 11, np.nan, "a", "b", "c"]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series(["a", "b", "c", 10, 11, np.nan]) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # if normal series only contains NaN-likes => not-category + s1 = Series([10, 11], dtype="category") + s2 = Series([np.nan, np.nan, np.nan]) + + exp = Series([10, 11, np.nan, np.nan, np.nan]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series([np.nan, np.nan, np.nan, 10, 11]) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + def test_concat_categorical_3elem_coercion(self): + # GH 13524 + + # mixed dtypes => not-category + s1 = Series([1, 2, np.nan], dtype="category") + s2 = Series([2, 1, 2], dtype="category") + s3 = Series([1, 2, 1, 2, np.nan]) + + exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float") + tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) + tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp) + + exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float") + tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp) + + # values are all in either category => not-category + s1 = Series([4, 5, 6], dtype="category") + s2 = Series([1, 2, 3], dtype="category") + s3 = Series([1, 3, 4]) + + exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4]) + tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) + tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp) + + exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3]) + tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp) + + # values are all in either category => not-category + s1 = Series([4, 5, 6], dtype="category") + s2 = Series([1, 2, 3], dtype="category") + s3 = Series([10, 11, 12]) + + exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12]) + tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp) + tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp) + + exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3]) + tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp) + + def test_concat_categorical_multi_coercion(self): + # GH 13524 + + s1 = Series([1, 3], dtype="category") + s2 = Series([3, 4], dtype="category") + s3 = Series([2, 3]) + s4 = Series([2, 2], dtype="category") + s5 = Series([1, np.nan]) + s6 = Series([1, 3, 2], dtype="category") + + # mixed dtype, values are all in categories => not-category + exp = Series([1, 3, 3, 4, 2, 3, 2, 2, 1, np.nan, 1, 3, 2]) + res = pd.concat([s1, s2, s3, s4, s5, s6], ignore_index=True) + tm.assert_series_equal(res, exp) + res = s1._append([s2, s3, s4, s5, s6], ignore_index=True) + tm.assert_series_equal(res, exp) + + exp = Series([1, 3, 2, 1, np.nan, 2, 2, 2, 3, 3, 4, 1, 3]) + res = pd.concat([s6, s5, s4, s3, s2, s1], ignore_index=True) + tm.assert_series_equal(res, exp) + res = s6._append([s5, s4, s3, s2, s1], ignore_index=True) + tm.assert_series_equal(res, exp) + + def test_concat_categorical_ordered(self): + # GH 13524 + + s1 = Series(Categorical([1, 2, np.nan], ordered=True)) + s2 = Series(Categorical([2, 1, 2], ordered=True)) + + exp = Series(Categorical([1, 2, np.nan, 2, 1, 2], ordered=True)) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + exp = Series(Categorical([1, 2, np.nan, 2, 1, 2, 1, 2, np.nan], ordered=True)) + tm.assert_series_equal(pd.concat([s1, s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s1._append([s2, s1], ignore_index=True), exp) + + def test_concat_categorical_coercion_nan(self): + # GH 13524 + + # some edge cases + # category + not-category => not category + s1 = Series(np.array([np.nan, np.nan], dtype=np.float64), dtype="category") + s2 = Series([np.nan, 1]) + + exp = Series([np.nan, np.nan, np.nan, 1]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + s1 = Series([1, np.nan], dtype="category") + s2 = Series([np.nan, np.nan]) + + exp = Series([1, np.nan, np.nan, np.nan], dtype="float") + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + # mixed dtype, all nan-likes => not-category + s1 = Series([np.nan, np.nan], dtype="category") + s2 = Series([np.nan, np.nan]) + + exp = Series([np.nan, np.nan, np.nan, np.nan]) + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + # all category nan-likes => category + s1 = Series([np.nan, np.nan], dtype="category") + s2 = Series([np.nan, np.nan], dtype="category") + + exp = Series([np.nan, np.nan, np.nan, np.nan], dtype="category") + + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + def test_concat_categorical_empty(self): + # GH 13524 + + s1 = Series([], dtype="category") + s2 = Series([1, 2], dtype="category") + + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) + tm.assert_series_equal(s1._append(s2, ignore_index=True), s2) + + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2) + tm.assert_series_equal(s2._append(s1, ignore_index=True), s2) + + s1 = Series([], dtype="category") + s2 = Series([], dtype="category") + + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) + tm.assert_series_equal(s1._append(s2, ignore_index=True), s2) + + s1 = Series([], dtype="category") + s2 = Series([], dtype="object") + + # different dtype => not-category + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), s2) + tm.assert_series_equal(s1._append(s2, ignore_index=True), s2) + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), s2) + tm.assert_series_equal(s2._append(s1, ignore_index=True), s2) + + s1 = Series([], dtype="category") + s2 = Series([np.nan, np.nan]) + + # empty Series is ignored + exp = Series([np.nan, np.nan]) + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) + tm.assert_series_equal(s1._append(s2, ignore_index=True), exp) + + with tm.assert_produces_warning(FutureWarning, match=msg): + tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp) + tm.assert_series_equal(s2._append(s1, ignore_index=True), exp) + + def test_categorical_concat_append(self): + cat = Categorical(["a", "b"], categories=["a", "b"]) + vals = [1, 2] + df = DataFrame({"cats": cat, "vals": vals}) + cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"]) + vals2 = [1, 2, 1, 2] + exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1])) + + tm.assert_frame_equal(pd.concat([df, df]), exp) + tm.assert_frame_equal(df._append(df), exp) + + # GH 13524 can concat different categories + cat3 = Categorical(["a", "b"], categories=["a", "b", "c"]) + vals3 = [1, 2] + df_different_categories = DataFrame({"cats": cat3, "vals": vals3}) + + res = pd.concat([df, df_different_categories], ignore_index=True) + exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]}) + tm.assert_frame_equal(res, exp) + + res = df._append(df_different_categories, ignore_index=True) + tm.assert_frame_equal(res, exp) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..bbaaf0abecfbdff32d5c7526e3c8c37c71e92cc9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_categorical.py @@ -0,0 +1,273 @@ +from datetime import datetime + +import numpy as np + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Series, +) +import pandas._testing as tm + + +class TestCategoricalConcat: + def test_categorical_concat(self, sort): + # See GH 10177 + df1 = DataFrame( + np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"] + ) + + df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"]) + + cat_values = ["one", "one", "two", "one", "two", "two", "one"] + df2["h"] = Series(Categorical(cat_values)) + + res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort) + exp = DataFrame( + { + "a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12], + "b": [ + 1, + 4, + 7, + 10, + 13, + 16, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + ], + "c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13], + "h": [None] * 6 + cat_values, + } + ) + exp["h"] = exp["h"].astype(df2["h"].dtype) + tm.assert_frame_equal(res, exp) + + def test_categorical_concat_dtypes(self, using_infer_string): + # GH8143 + index = ["cat", "obj", "num"] + cat = Categorical(["a", "b", "c"]) + obj = Series(["a", "b", "c"]) + num = Series([1, 2, 3]) + df = pd.concat([Series(cat), obj, num], axis=1, keys=index) + + result = df.dtypes == ( + object if not using_infer_string else "string[pyarrow_numpy]" + ) + expected = Series([False, True, False], index=index) + tm.assert_series_equal(result, expected) + + result = df.dtypes == "int64" + expected = Series([False, False, True], index=index) + tm.assert_series_equal(result, expected) + + result = df.dtypes == "category" + expected = Series([True, False, False], index=index) + tm.assert_series_equal(result, expected) + + def test_concat_categoricalindex(self): + # GH 16111, categories that aren't lexsorted + categories = [9, 0, 1, 2, 3] + + a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories)) + b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories)) + c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories)) + + result = pd.concat([a, b, c], axis=1) + + exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories) + exp = DataFrame( + { + 0: [1, 1, np.nan, np.nan], + 1: [np.nan, 2, 2, np.nan], + 2: [np.nan, np.nan, 3, 3], + }, + columns=[0, 1, 2], + index=exp_idx, + ) + tm.assert_frame_equal(result, exp) + + def test_categorical_concat_preserve(self): + # GH 8641 series concat not preserving category dtype + # GH 13524 can concat different categories + s = Series(list("abc"), dtype="category") + s2 = Series(list("abd"), dtype="category") + + exp = Series(list("abcabd")) + res = pd.concat([s, s2], ignore_index=True) + tm.assert_series_equal(res, exp) + + exp = Series(list("abcabc"), dtype="category") + res = pd.concat([s, s], ignore_index=True) + tm.assert_series_equal(res, exp) + + exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category") + res = pd.concat([s, s]) + tm.assert_series_equal(res, exp) + + a = Series(np.arange(6, dtype="int64")) + b = Series(list("aabbca")) + + df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))}) + res = pd.concat([df2, df2]) + exp = DataFrame( + { + "A": pd.concat([a, a]), + "B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))), + } + ) + tm.assert_frame_equal(res, exp) + + def test_categorical_index_preserver(self): + a = Series(np.arange(6, dtype="int64")) + b = Series(list("aabbca")) + + df2 = DataFrame( + {"A": a, "B": b.astype(CategoricalDtype(list("cab")))} + ).set_index("B") + result = pd.concat([df2, df2]) + expected = DataFrame( + { + "A": pd.concat([a, a]), + "B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))), + } + ).set_index("B") + tm.assert_frame_equal(result, expected) + + # wrong categories -> uses concat_compat, which casts to object + df3 = DataFrame( + {"A": a, "B": Categorical(b, categories=list("abe"))} + ).set_index("B") + result = pd.concat([df2, df3]) + expected = pd.concat( + [ + df2.set_axis(df2.index.astype(object), axis=0), + df3.set_axis(df3.index.astype(object), axis=0), + ] + ) + tm.assert_frame_equal(result, expected) + + def test_concat_categorical_tz(self): + # GH-23816 + a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific")) + b = Series(["a", "b"], dtype="category") + result = pd.concat([a, b], ignore_index=True) + expected = Series( + [ + pd.Timestamp("2017-01-01", tz="US/Pacific"), + pd.Timestamp("2017-01-02", tz="US/Pacific"), + "a", + "b", + ] + ) + tm.assert_series_equal(result, expected) + + def test_concat_categorical_datetime(self): + # GH-39443 + df1 = DataFrame( + {"x": Series(datetime(2021, 1, 1), index=[0], dtype="category")} + ) + df2 = DataFrame( + {"x": Series(datetime(2021, 1, 2), index=[1], dtype="category")} + ) + + result = pd.concat([df1, df2]) + expected = DataFrame( + {"x": Series([datetime(2021, 1, 1), datetime(2021, 1, 2)])} + ) + + tm.assert_equal(result, expected) + + def test_concat_categorical_unchanged(self): + # GH-12007 + # test fix for when concat on categorical and float + # coerces dtype categorical -> float + df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A")) + ser = Series([0, 1, 2], index=[0, 1, 3], name="B") + result = pd.concat([df, ser], axis=1) + expected = DataFrame( + { + "A": Series(["a", "b", "c", np.nan], dtype="category"), + "B": Series([0, 1, np.nan, 2], dtype="float"), + } + ) + tm.assert_equal(result, expected) + + def test_categorical_concat_gh7864(self): + # GH 7864 + # make sure ordering is preserved + df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")}) + df["grade"] = Categorical(df["raw_grade"]) + df["grade"].cat.set_categories(["e", "a", "b"]) + + df1 = df[0:3] + df2 = df[3:] + + tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories) + tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories) + + dfx = pd.concat([df1, df2]) + tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories) + + dfa = df1._append(df2) + tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories) + + def test_categorical_index_upcast(self): + # GH 17629 + # test upcasting to object when concatenating on categorical indexes + # with non-identical categories + + a = DataFrame({"foo": [1, 2]}, index=Categorical(["foo", "bar"])) + b = DataFrame({"foo": [4, 3]}, index=Categorical(["baz", "bar"])) + + res = pd.concat([a, b]) + exp = DataFrame({"foo": [1, 2, 4, 3]}, index=["foo", "bar", "baz", "bar"]) + + tm.assert_equal(res, exp) + + a = Series([1, 2], index=Categorical(["foo", "bar"])) + b = Series([4, 3], index=Categorical(["baz", "bar"])) + + res = pd.concat([a, b]) + exp = Series([1, 2, 4, 3], index=["foo", "bar", "baz", "bar"]) + + tm.assert_equal(res, exp) + + def test_categorical_missing_from_one_frame(self): + # GH 25412 + df1 = DataFrame({"f1": [1, 2, 3]}) + df2 = DataFrame({"f1": [2, 3, 1], "f2": Series([4, 4, 4]).astype("category")}) + result = pd.concat([df1, df2], sort=True) + dtype = CategoricalDtype([4]) + expected = DataFrame( + { + "f1": [1, 2, 3, 2, 3, 1], + "f2": Categorical.from_codes([-1, -1, -1, 0, 0, 0], dtype=dtype), + }, + index=[0, 1, 2, 0, 1, 2], + ) + tm.assert_frame_equal(result, expected) + + def test_concat_categorical_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/24845 + + c1 = pd.CategoricalIndex(["a", "a"], categories=["a", "b"], ordered=False) + c2 = pd.CategoricalIndex(["b", "b"], categories=["b", "a"], ordered=False) + c3 = pd.CategoricalIndex( + ["a", "a", "b", "b"], categories=["a", "b"], ordered=False + ) + + df1 = DataFrame({"A": [1, 2]}, index=c1) + df2 = DataFrame({"A": [3, 4]}, index=c2) + + result = pd.concat((df1, df2)) + expected = DataFrame({"A": [1, 2, 3, 4]}, index=c3) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py new file mode 100644 index 0000000000000000000000000000000000000000..9e34d02091e698d2ace7acd74896d6e68c556252 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_concat.py @@ -0,0 +1,912 @@ +from collections import ( + abc, + deque, +) +from collections.abc import Iterator +from datetime import datetime +from decimal import Decimal + +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + PeriodIndex, + Series, + concat, + date_range, +) +import pandas._testing as tm +from pandas.core.arrays import SparseArray +from pandas.tests.extension.decimal import to_decimal + + +class TestConcatenate: + def test_append_concat(self): + # GH#1815 + d1 = date_range("12/31/1990", "12/31/1999", freq="YE-DEC") + d2 = date_range("12/31/2000", "12/31/2009", freq="YE-DEC") + + s1 = Series(np.random.default_rng(2).standard_normal(10), d1) + s2 = Series(np.random.default_rng(2).standard_normal(10), d2) + + s1 = s1.to_period() + s2 = s2.to_period() + + # drops index + result = concat([s1, s2]) + assert isinstance(result.index, PeriodIndex) + assert result.index[0] == s1.index[0] + + def test_concat_copy(self, using_array_manager, using_copy_on_write): + df = DataFrame(np.random.default_rng(2).standard_normal((4, 3))) + df2 = DataFrame(np.random.default_rng(2).integers(0, 10, size=4).reshape(4, 1)) + df3 = DataFrame({5: "foo"}, index=range(4)) + + # These are actual copies. + result = concat([df, df2, df3], axis=1, copy=True) + + if not using_copy_on_write: + for arr in result._mgr.arrays: + assert not any( + np.shares_memory(arr, y) + for x in [df, df2, df3] + for y in x._mgr.arrays + ) + else: + for arr in result._mgr.arrays: + assert arr.base is not None + + # These are the same. + result = concat([df, df2, df3], axis=1, copy=False) + + for arr in result._mgr.arrays: + if arr.dtype.kind == "f": + assert arr.base is df._mgr.arrays[0].base + elif arr.dtype.kind in ["i", "u"]: + assert arr.base is df2._mgr.arrays[0].base + elif arr.dtype == object: + if using_array_manager: + # we get the same array object, which has no base + assert arr is df3._mgr.arrays[0] + else: + assert arr.base is not None + + # Float block was consolidated. + df4 = DataFrame(np.random.default_rng(2).standard_normal((4, 1))) + result = concat([df, df2, df3, df4], axis=1, copy=False) + for arr in result._mgr.arrays: + if arr.dtype.kind == "f": + if using_array_manager or using_copy_on_write: + # this is a view on some array in either df or df4 + assert any( + np.shares_memory(arr, other) + for other in df._mgr.arrays + df4._mgr.arrays + ) + else: + # the block was consolidated, so we got a copy anyway + assert arr.base is None + elif arr.dtype.kind in ["i", "u"]: + assert arr.base is df2._mgr.arrays[0].base + elif arr.dtype == object: + # this is a view on df3 + assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays) + + def test_concat_with_group_keys(self): + # axis=0 + df = DataFrame(np.random.default_rng(2).standard_normal((3, 4))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((4, 4))) + + result = concat([df, df2], keys=[0, 1]) + exp_index = MultiIndex.from_arrays( + [[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]] + ) + expected = DataFrame(np.r_[df.values, df2.values], index=exp_index) + tm.assert_frame_equal(result, expected) + + result = concat([df, df], keys=[0, 1]) + exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]) + expected = DataFrame(np.r_[df.values, df.values], index=exp_index2) + tm.assert_frame_equal(result, expected) + + # axis=1 + df = DataFrame(np.random.default_rng(2).standard_normal((4, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((4, 4))) + + result = concat([df, df2], keys=[0, 1], axis=1) + expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index) + tm.assert_frame_equal(result, expected) + + result = concat([df, df], keys=[0, 1], axis=1) + expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2) + tm.assert_frame_equal(result, expected) + + def test_concat_keys_specific_levels(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]] + level = ["three", "two", "one", "zero"] + result = concat( + pieces, + axis=1, + keys=["one", "two", "three"], + levels=[level], + names=["group_key"], + ) + + tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key")) + tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3])) + + assert result.columns.names == ["group_key", None] + + @pytest.mark.parametrize("mapping", ["mapping", "dict"]) + def test_concat_mapping(self, mapping, non_dict_mapping_subclass): + constructor = dict if mapping == "dict" else non_dict_mapping_subclass + frames = constructor( + { + "foo": DataFrame(np.random.default_rng(2).standard_normal((4, 3))), + "bar": DataFrame(np.random.default_rng(2).standard_normal((4, 3))), + "baz": DataFrame(np.random.default_rng(2).standard_normal((4, 3))), + "qux": DataFrame(np.random.default_rng(2).standard_normal((4, 3))), + } + ) + + sorted_keys = list(frames.keys()) + + result = concat(frames) + expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys) + tm.assert_frame_equal(result, expected) + + result = concat(frames, axis=1) + expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1) + tm.assert_frame_equal(result, expected) + + keys = ["baz", "foo", "bar"] + result = concat(frames, keys=keys) + expected = concat([frames[k] for k in keys], keys=keys) + tm.assert_frame_equal(result, expected) + + def test_concat_keys_and_levels(self): + df = DataFrame(np.random.default_rng(2).standard_normal((1, 3))) + df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4))) + + levels = [["foo", "baz"], ["one", "two"]] + names = ["first", "second"] + result = concat( + [df, df2, df, df2], + keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], + levels=levels, + names=names, + ) + expected = concat([df, df2, df, df2]) + exp_index = MultiIndex( + levels=levels + [[0]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]], + names=names + [None], + ) + expected.index = exp_index + + tm.assert_frame_equal(result, expected) + + # no names + result = concat( + [df, df2, df, df2], + keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], + levels=levels, + ) + assert result.index.names == (None,) * 3 + + # no levels + result = concat( + [df, df2, df, df2], + keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")], + names=["first", "second"], + ) + assert result.index.names == ("first", "second", None) + tm.assert_index_equal( + result.index.levels[0], Index(["baz", "foo"], name="first") + ) + + def test_concat_keys_levels_no_overlap(self): + # GH #1406 + df = DataFrame(np.random.default_rng(2).standard_normal((1, 3)), index=["a"]) + df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4)), index=["b"]) + + msg = "Values not found in passed level" + with pytest.raises(ValueError, match=msg): + concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]]) + + msg = "Key one not in level" + with pytest.raises(ValueError, match=msg): + concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]]) + + def test_crossed_dtypes_weird_corner(self): + columns = ["A", "B", "C", "D"] + df1 = DataFrame( + { + "A": np.array([1, 2, 3, 4], dtype="f8"), + "B": np.array([1, 2, 3, 4], dtype="i8"), + "C": np.array([1, 2, 3, 4], dtype="f8"), + "D": np.array([1, 2, 3, 4], dtype="i8"), + }, + columns=columns, + ) + + df2 = DataFrame( + { + "A": np.array([1, 2, 3, 4], dtype="i8"), + "B": np.array([1, 2, 3, 4], dtype="f8"), + "C": np.array([1, 2, 3, 4], dtype="i8"), + "D": np.array([1, 2, 3, 4], dtype="f8"), + }, + columns=columns, + ) + + appended = concat([df1, df2], ignore_index=True) + expected = DataFrame( + np.concatenate([df1.values, df2.values], axis=0), columns=columns + ) + tm.assert_frame_equal(appended, expected) + + df = DataFrame(np.random.default_rng(2).standard_normal((1, 3)), index=["a"]) + df2 = DataFrame(np.random.default_rng(2).standard_normal((1, 4)), index=["b"]) + result = concat([df, df2], keys=["one", "two"], names=["first", "second"]) + assert result.index.names == ("first", "second") + + def test_with_mixed_tuples(self, sort): + # 10697 + # columns have mixed tuples, so handle properly + df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2)) + df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2)) + + # it works + concat([df1, df2], sort=sort) + + def test_concat_mixed_objs_columns(self): + # Test column-wise concat for mixed series/frames (axis=1) + # G2385 + + index = date_range("01-Jan-2013", periods=10, freq="h") + arr = np.arange(10, dtype="int64") + s1 = Series(arr, index=index) + s2 = Series(arr, index=index) + df = DataFrame(arr.reshape(-1, 1), index=index) + + expected = DataFrame( + np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0] + ) + result = concat([df, df], axis=1) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1] + ) + result = concat([s1, s2], axis=1) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2] + ) + result = concat([s1, s2, s1], axis=1) + tm.assert_frame_equal(result, expected) + + expected = DataFrame( + np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3] + ) + result = concat([s1, df, s2, s2, s1], axis=1) + tm.assert_frame_equal(result, expected) + + # with names + s1.name = "foo" + expected = DataFrame( + np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0] + ) + result = concat([s1, df, s2], axis=1) + tm.assert_frame_equal(result, expected) + + s2.name = "bar" + expected = DataFrame( + np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"] + ) + result = concat([s1, df, s2], axis=1) + tm.assert_frame_equal(result, expected) + + # ignore index + expected = DataFrame( + np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2] + ) + result = concat([s1, df, s2], axis=1, ignore_index=True) + tm.assert_frame_equal(result, expected) + + def test_concat_mixed_objs_index(self): + # Test row-wise concat for mixed series/frames with a common name + # GH2385, GH15047 + + index = date_range("01-Jan-2013", periods=10, freq="h") + arr = np.arange(10, dtype="int64") + s1 = Series(arr, index=index) + s2 = Series(arr, index=index) + df = DataFrame(arr.reshape(-1, 1), index=index) + + expected = DataFrame( + np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0] + ) + result = concat([s1, df, s2]) + tm.assert_frame_equal(result, expected) + + def test_concat_mixed_objs_index_names(self): + # Test row-wise concat for mixed series/frames with distinct names + # GH2385, GH15047 + + index = date_range("01-Jan-2013", periods=10, freq="h") + arr = np.arange(10, dtype="int64") + s1 = Series(arr, index=index, name="foo") + s2 = Series(arr, index=index, name="bar") + df = DataFrame(arr.reshape(-1, 1), index=index) + + expected = DataFrame( + np.kron(np.where(np.identity(3) == 1, 1, np.nan), arr).T, + index=index.tolist() * 3, + columns=["foo", 0, "bar"], + ) + result = concat([s1, df, s2]) + tm.assert_frame_equal(result, expected) + + # Rename all series to 0 when ignore_index=True + expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0]) + result = concat([s1, df, s2], ignore_index=True) + tm.assert_frame_equal(result, expected) + + def test_dtype_coercion(self): + # 12411 + df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]}) + + result = concat([df.iloc[[0]], df.iloc[[1]]]) + tm.assert_series_equal(result.dtypes, df.dtypes) + + # 12045 + df = DataFrame({"date": [datetime(2012, 1, 1), datetime(1012, 1, 2)]}) + result = concat([df.iloc[[0]], df.iloc[[1]]]) + tm.assert_series_equal(result.dtypes, df.dtypes) + + # 11594 + df = DataFrame({"text": ["some words"] + [None] * 9}) + result = concat([df.iloc[[0]], df.iloc[[1]]]) + tm.assert_series_equal(result.dtypes, df.dtypes) + + def test_concat_single_with_key(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + + result = concat([df], keys=["foo"]) + expected = concat([df, df], keys=["foo", "bar"]) + tm.assert_frame_equal(result, expected[:10]) + + def test_concat_no_items_raises(self): + with pytest.raises(ValueError, match="No objects to concatenate"): + concat([]) + + def test_concat_exclude_none(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + + pieces = [df[:5], None, None, df[5:]] + result = concat(pieces) + tm.assert_frame_equal(result, df) + with pytest.raises(ValueError, match="All objects passed were None"): + concat([None, None]) + + def test_concat_keys_with_none(self): + # #1649 + df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]]) + + result = concat({"a": None, "b": df0, "c": df0[:2], "d": df0[:1], "e": df0}) + expected = concat({"b": df0, "c": df0[:2], "d": df0[:1], "e": df0}) + tm.assert_frame_equal(result, expected) + + result = concat( + [None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"] + ) + expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"]) + tm.assert_frame_equal(result, expected) + + def test_concat_bug_1719(self): + ts1 = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + ts2 = ts1.copy()[::2] + + # to join with union + # these two are of different length! + left = concat([ts1, ts2], join="outer", axis=1) + right = concat([ts2, ts1], join="outer", axis=1) + + assert len(left) == len(right) + + def test_concat_bug_2972(self): + ts0 = Series(np.zeros(5)) + ts1 = Series(np.ones(5)) + ts0.name = ts1.name = "same name" + result = concat([ts0, ts1], axis=1) + + expected = DataFrame({0: ts0, 1: ts1}) + expected.columns = ["same name", "same name"] + tm.assert_frame_equal(result, expected) + + def test_concat_bug_3602(self): + # GH 3602, duplicate columns + df1 = DataFrame( + { + "firmNo": [0, 0, 0, 0], + "prc": [6, 6, 6, 6], + "stringvar": ["rrr", "rrr", "rrr", "rrr"], + } + ) + df2 = DataFrame( + {"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]} + ) + expected = DataFrame( + [ + [0, 6, "rrr", 9, 1, 6], + [0, 6, "rrr", 10, 2, 6], + [0, 6, "rrr", 11, 3, 6], + [0, 6, "rrr", 12, 4, 6], + ] + ) + expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"] + + result = concat([df1, df2], axis=1) + tm.assert_frame_equal(result, expected) + + def test_concat_iterables(self): + # GH8645 check concat works with tuples, list, generators, and weird + # stuff like deque and custom iterables + df1 = DataFrame([1, 2, 3]) + df2 = DataFrame([4, 5, 6]) + expected = DataFrame([1, 2, 3, 4, 5, 6]) + tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected) + tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected) + tm.assert_frame_equal( + concat((df for df in (df1, df2)), ignore_index=True), expected + ) + tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected) + + class CustomIterator1: + def __len__(self) -> int: + return 2 + + def __getitem__(self, index): + try: + return {0: df1, 1: df2}[index] + except KeyError as err: + raise IndexError from err + + tm.assert_frame_equal(concat(CustomIterator1(), ignore_index=True), expected) + + class CustomIterator2(abc.Iterable): + def __iter__(self) -> Iterator: + yield df1 + yield df2 + + tm.assert_frame_equal(concat(CustomIterator2(), ignore_index=True), expected) + + def test_concat_order(self): + # GH 17344, GH#47331 + dfs = [DataFrame(index=range(3), columns=["a", 1, None])] + dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for _ in range(100)] + + result = concat(dfs, sort=True).columns + expected = Index([1, "a", None]) + tm.assert_index_equal(result, expected) + + def test_concat_different_extension_dtypes_upcasts(self): + a = Series(pd.array([1, 2], dtype="Int64")) + b = Series(to_decimal([1, 2])) + + result = concat([a, b], ignore_index=True) + expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object) + tm.assert_series_equal(result, expected) + + def test_concat_ordered_dict(self): + # GH 21510 + expected = concat( + [Series(range(3)), Series(range(4))], keys=["First", "Another"] + ) + result = concat({"First": Series(range(3)), "Another": Series(range(4))}) + tm.assert_series_equal(result, expected) + + def test_concat_duplicate_indices_raise(self): + # GH 45888: test raise for concat DataFrames with duplicate indices + # https://github.com/pandas-dev/pandas/issues/36263 + df1 = DataFrame( + np.random.default_rng(2).standard_normal(5), + index=[0, 1, 2, 3, 3], + columns=["a"], + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal(5), + index=[0, 1, 2, 2, 4], + columns=["b"], + ) + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + concat([df1, df2], axis=1) + + +def test_concat_no_unnecessary_upcast(float_numpy_dtype, frame_or_series): + # GH 13247 + dims = frame_or_series(dtype=object).ndim + dt = float_numpy_dtype + + dfs = [ + frame_or_series(np.array([1], dtype=dt, ndmin=dims)), + frame_or_series(np.array([np.nan], dtype=dt, ndmin=dims)), + frame_or_series(np.array([5], dtype=dt, ndmin=dims)), + ] + x = concat(dfs) + assert x.values.dtype == dt + + +@pytest.mark.parametrize("pdt", [Series, DataFrame]) +def test_concat_will_upcast(pdt, any_signed_int_numpy_dtype): + dt = any_signed_int_numpy_dtype + dims = pdt().ndim + dfs = [ + pdt(np.array([1], dtype=dt, ndmin=dims)), + pdt(np.array([np.nan], ndmin=dims)), + pdt(np.array([5], dtype=dt, ndmin=dims)), + ] + x = concat(dfs) + assert x.values.dtype == "float64" + + +def test_concat_empty_and_non_empty_frame_regression(): + # GH 18178 regression test + df1 = DataFrame({"foo": [1]}) + df2 = DataFrame({"foo": []}) + expected = DataFrame({"foo": [1.0]}) + result = concat([df1, df2]) + tm.assert_frame_equal(result, expected) + + +def test_concat_sparse(): + # GH 23557 + a = Series(SparseArray([0, 1, 2])) + expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype( + pd.SparseDtype(np.int64, 0) + ) + result = concat([a, a], axis=1) + tm.assert_frame_equal(result, expected) + + +def test_concat_dense_sparse(): + # GH 30668 + dtype = pd.SparseDtype(np.float64, None) + a = Series(pd.arrays.SparseArray([1, None]), dtype=dtype) + b = Series([1], dtype=float) + expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(dtype) + result = concat([a, b], axis=0) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("keys", [["e", "f", "f"], ["f", "e", "f"]]) +def test_duplicate_keys(keys): + # GH 33654 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + s1 = Series([7, 8, 9], name="c") + s2 = Series([10, 11, 12], name="d") + result = concat([df, s1, s2], axis=1, keys=keys) + expected_values = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + expected_columns = MultiIndex.from_tuples( + [(keys[0], "a"), (keys[0], "b"), (keys[1], "c"), (keys[2], "d")] + ) + expected = DataFrame(expected_values, columns=expected_columns) + tm.assert_frame_equal(result, expected) + + +def test_duplicate_keys_same_frame(): + # GH 43595 + keys = ["e", "e"] + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + result = concat([df, df], axis=1, keys=keys) + expected_values = [[1, 4, 1, 4], [2, 5, 2, 5], [3, 6, 3, 6]] + expected_columns = MultiIndex.from_tuples( + [(keys[0], "a"), (keys[0], "b"), (keys[1], "a"), (keys[1], "b")] + ) + expected = DataFrame(expected_values, columns=expected_columns) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" +) +@pytest.mark.parametrize( + "obj", + [ + tm.SubclassedDataFrame({"A": np.arange(0, 10)}), + tm.SubclassedSeries(np.arange(0, 10), name="A"), + ], +) +def test_concat_preserves_subclass(obj): + # GH28330 -- preserve subclass + + result = concat([obj, obj]) + assert isinstance(result, type(obj)) + + +def test_concat_frame_axis0_extension_dtypes(): + # preserve extension dtype (through common_dtype mechanism) + df1 = DataFrame({"a": pd.array([1, 2, 3], dtype="Int64")}) + df2 = DataFrame({"a": np.array([4, 5, 6])}) + + result = concat([df1, df2], ignore_index=True) + expected = DataFrame({"a": [1, 2, 3, 4, 5, 6]}, dtype="Int64") + tm.assert_frame_equal(result, expected) + + result = concat([df2, df1], ignore_index=True) + expected = DataFrame({"a": [4, 5, 6, 1, 2, 3]}, dtype="Int64") + tm.assert_frame_equal(result, expected) + + +def test_concat_preserves_extension_int64_dtype(): + # GH 24768 + df_a = DataFrame({"a": [-1]}, dtype="Int64") + df_b = DataFrame({"b": [1]}, dtype="Int64") + result = concat([df_a, df_b], ignore_index=True) + expected = DataFrame({"a": [-1, None], "b": [None, 1]}, dtype="Int64") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype1,dtype2,expected_dtype", + [ + ("bool", "bool", "bool"), + ("boolean", "bool", "boolean"), + ("bool", "boolean", "boolean"), + ("boolean", "boolean", "boolean"), + ], +) +def test_concat_bool_types(dtype1, dtype2, expected_dtype): + # GH 42800 + ser1 = Series([True, False], dtype=dtype1) + ser2 = Series([False, True], dtype=dtype2) + result = concat([ser1, ser2], ignore_index=True) + expected = Series([True, False, False, True], dtype=expected_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + ("keys", "integrity"), + [ + (["red"] * 3, True), + (["red"] * 3, False), + (["red", "blue", "red"], False), + (["red", "blue", "red"], True), + ], +) +def test_concat_repeated_keys(keys, integrity): + # GH: 20816 + series_list = [Series({"a": 1}), Series({"b": 2}), Series({"c": 3})] + result = concat(series_list, keys=keys, verify_integrity=integrity) + tuples = list(zip(keys, ["a", "b", "c"])) + expected = Series([1, 2, 3], index=MultiIndex.from_tuples(tuples)) + tm.assert_series_equal(result, expected) + + +def test_concat_null_object_with_dti(): + # GH#40841 + dti = pd.DatetimeIndex( + ["2021-04-08 21:21:14+00:00"], dtype="datetime64[ns, UTC]", name="Time (UTC)" + ) + right = DataFrame(data={"C": [0.5274]}, index=dti) + + idx = Index([None], dtype="object", name="Maybe Time (UTC)") + left = DataFrame(data={"A": [None], "B": [np.nan]}, index=idx) + + result = concat([left, right], axis="columns") + + exp_index = Index([None, dti[0]], dtype=object) + expected = DataFrame( + { + "A": np.array([None, np.nan], dtype=object), + "B": [np.nan, np.nan], + "C": [np.nan, 0.5274], + }, + index=exp_index, + ) + tm.assert_frame_equal(result, expected) + + +def test_concat_multiindex_with_empty_rangeindex(): + # GH#41234 + mi = MultiIndex.from_tuples([("B", 1), ("C", 1)]) + df1 = DataFrame([[1, 2]], columns=mi) + df2 = DataFrame(index=[1], columns=pd.RangeIndex(0)) + + result = concat([df1, df2]) + expected = DataFrame([[1, 2], [np.nan, np.nan]], columns=mi) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [ + Series(data=[1, 2]), + DataFrame( + data={ + "col1": [1, 2], + } + ), + DataFrame(dtype=float), + Series(dtype=float), + ], +) +def test_concat_drop_attrs(data): + # GH#41828 + df1 = data.copy() + df1.attrs = {1: 1} + df2 = data.copy() + df2.attrs = {1: 2} + df = concat([df1, df2]) + assert len(df.attrs) == 0 + + +@pytest.mark.parametrize( + "data", + [ + Series(data=[1, 2]), + DataFrame( + data={ + "col1": [1, 2], + } + ), + DataFrame(dtype=float), + Series(dtype=float), + ], +) +def test_concat_retain_attrs(data): + # GH#41828 + df1 = data.copy() + df1.attrs = {1: 1} + df2 = data.copy() + df2.attrs = {1: 1} + df = concat([df1, df2]) + assert df.attrs[1] == 1 + + +@td.skip_array_manager_invalid_test +@pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"]) +@pytest.mark.parametrize("empty_dtype", [None, "float64", "object"]) +def test_concat_ignore_empty_object_float(empty_dtype, df_dtype): + # https://github.com/pandas-dev/pandas/issues/45637 + df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype) + empty = DataFrame(columns=["foo", "bar"], dtype=empty_dtype) + + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + warn = None + if df_dtype == "datetime64[ns]" or ( + df_dtype == "float64" and empty_dtype != "float64" + ): + warn = FutureWarning + with tm.assert_produces_warning(warn, match=msg): + result = concat([empty, df]) + expected = df + if df_dtype == "int64": + # TODO what exact behaviour do we want for integer eventually? + if empty_dtype == "float64": + expected = df.astype("float64") + else: + expected = df.astype("object") + tm.assert_frame_equal(result, expected) + + +@td.skip_array_manager_invalid_test +@pytest.mark.parametrize("df_dtype", ["float64", "int64", "datetime64[ns]"]) +@pytest.mark.parametrize("empty_dtype", [None, "float64", "object"]) +def test_concat_ignore_all_na_object_float(empty_dtype, df_dtype): + df = DataFrame({"foo": [1, 2], "bar": [1, 2]}, dtype=df_dtype) + empty = DataFrame({"foo": [np.nan], "bar": [np.nan]}, dtype=empty_dtype) + + if df_dtype == "int64": + # TODO what exact behaviour do we want for integer eventually? + if empty_dtype == "object": + df_dtype = "object" + else: + df_dtype = "float64" + + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + warn = None + if empty_dtype != df_dtype and empty_dtype is not None: + warn = FutureWarning + elif df_dtype == "datetime64[ns]": + warn = FutureWarning + + with tm.assert_produces_warning(warn, match=msg): + result = concat([empty, df], ignore_index=True) + + expected = DataFrame({"foo": [np.nan, 1, 2], "bar": [np.nan, 1, 2]}, dtype=df_dtype) + tm.assert_frame_equal(result, expected) + + +@td.skip_array_manager_invalid_test +def test_concat_ignore_empty_from_reindex(): + # https://github.com/pandas-dev/pandas/pull/43507#issuecomment-920375856 + df1 = DataFrame({"a": [1], "b": [pd.Timestamp("2012-01-01")]}) + df2 = DataFrame({"a": [2]}) + + aligned = df2.reindex(columns=df1.columns) + + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([df1, aligned], ignore_index=True) + expected = df1 = DataFrame({"a": [1, 2], "b": [pd.Timestamp("2012-01-01"), pd.NaT]}) + tm.assert_frame_equal(result, expected) + + +def test_concat_mismatched_keys_length(): + # GH#43485 + ser = Series(range(5)) + sers = [ser + n for n in range(4)] + keys = ["A", "B", "C"] + + msg = r"The behavior of pd.concat with len\(keys\) != len\(objs\) is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + concat(sers, keys=keys, axis=1) + with tm.assert_produces_warning(FutureWarning, match=msg): + concat(sers, keys=keys, axis=0) + with tm.assert_produces_warning(FutureWarning, match=msg): + concat((x for x in sers), keys=(y for y in keys), axis=1) + with tm.assert_produces_warning(FutureWarning, match=msg): + concat((x for x in sers), keys=(y for y in keys), axis=0) + + +def test_concat_multiindex_with_category(): + df1 = DataFrame( + { + "c1": Series(list("abc"), dtype="category"), + "c2": Series(list("eee"), dtype="category"), + "i2": Series([1, 2, 3]), + } + ) + df1 = df1.set_index(["c1", "c2"]) + df2 = DataFrame( + { + "c1": Series(list("abc"), dtype="category"), + "c2": Series(list("eee"), dtype="category"), + "i2": Series([4, 5, 6]), + } + ) + df2 = df2.set_index(["c1", "c2"]) + result = concat([df1, df2]) + expected = DataFrame( + { + "c1": Series(list("abcabc"), dtype="category"), + "c2": Series(list("eeeeee"), dtype="category"), + "i2": Series([1, 2, 3, 4, 5, 6]), + } + ) + expected = expected.set_index(["c1", "c2"]) + tm.assert_frame_equal(result, expected) + + +def test_concat_ea_upcast(): + # GH#54848 + df1 = DataFrame(["a"], dtype="string") + df2 = DataFrame([1], dtype="Int64") + result = concat([df1, df2]) + expected = DataFrame(["a", 1], index=[0, 0]) + tm.assert_frame_equal(result, expected) + + +def test_concat_none_with_timezone_timestamp(): + # GH#52093 + df1 = DataFrame([{"A": None}]) + df2 = DataFrame([{"A": pd.Timestamp("1990-12-20 00:00:00+00:00")}]) + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([df1, df2], ignore_index=True) + expected = DataFrame({"A": [None, pd.Timestamp("1990-12-20 00:00:00+00:00")]}) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py new file mode 100644 index 0000000000000000000000000000000000000000..f288921c25753463030cfe4f765b2709b676c858 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_dataframe.py @@ -0,0 +1,230 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + concat, +) +import pandas._testing as tm + + +class TestDataFrameConcat: + def test_concat_multiple_frames_dtypes(self): + # GH#2759 + df1 = DataFrame(data=np.ones((10, 2)), columns=["foo", "bar"], dtype=np.float64) + df2 = DataFrame(data=np.ones((10, 2)), dtype=np.float32) + results = concat((df1, df2), axis=1).dtypes + expected = Series( + [np.dtype("float64")] * 2 + [np.dtype("float32")] * 2, + index=["foo", "bar", 0, 1], + ) + tm.assert_series_equal(results, expected) + + def test_concat_tuple_keys(self): + # GH#14438 + df1 = DataFrame(np.ones((2, 2)), columns=list("AB")) + df2 = DataFrame(np.ones((3, 2)) * 2, columns=list("AB")) + results = concat((df1, df2), keys=[("bee", "bah"), ("bee", "boo")]) + expected = DataFrame( + { + "A": { + ("bee", "bah", 0): 1.0, + ("bee", "bah", 1): 1.0, + ("bee", "boo", 0): 2.0, + ("bee", "boo", 1): 2.0, + ("bee", "boo", 2): 2.0, + }, + "B": { + ("bee", "bah", 0): 1.0, + ("bee", "bah", 1): 1.0, + ("bee", "boo", 0): 2.0, + ("bee", "boo", 1): 2.0, + ("bee", "boo", 2): 2.0, + }, + } + ) + tm.assert_frame_equal(results, expected) + + def test_concat_named_keys(self): + # GH#14252 + df = DataFrame({"foo": [1, 2], "bar": [0.1, 0.2]}) + index = Index(["a", "b"], name="baz") + concatted_named_from_keys = concat([df, df], keys=index) + expected_named = DataFrame( + {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]}, + index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=["baz", None]), + ) + tm.assert_frame_equal(concatted_named_from_keys, expected_named) + + index_no_name = Index(["a", "b"], name=None) + concatted_named_from_names = concat([df, df], keys=index_no_name, names=["baz"]) + tm.assert_frame_equal(concatted_named_from_names, expected_named) + + concatted_unnamed = concat([df, df], keys=index_no_name) + expected_unnamed = DataFrame( + {"foo": [1, 2, 1, 2], "bar": [0.1, 0.2, 0.1, 0.2]}, + index=pd.MultiIndex.from_product((["a", "b"], [0, 1]), names=[None, None]), + ) + tm.assert_frame_equal(concatted_unnamed, expected_unnamed) + + def test_concat_axis_parameter(self): + # GH#14369 + df1 = DataFrame({"A": [0.1, 0.2]}, index=range(2)) + df2 = DataFrame({"A": [0.3, 0.4]}, index=range(2)) + + # Index/row/0 DataFrame + expected_index = DataFrame({"A": [0.1, 0.2, 0.3, 0.4]}, index=[0, 1, 0, 1]) + + concatted_index = concat([df1, df2], axis="index") + tm.assert_frame_equal(concatted_index, expected_index) + + concatted_row = concat([df1, df2], axis="rows") + tm.assert_frame_equal(concatted_row, expected_index) + + concatted_0 = concat([df1, df2], axis=0) + tm.assert_frame_equal(concatted_0, expected_index) + + # Columns/1 DataFrame + expected_columns = DataFrame( + [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=["A", "A"] + ) + + concatted_columns = concat([df1, df2], axis="columns") + tm.assert_frame_equal(concatted_columns, expected_columns) + + concatted_1 = concat([df1, df2], axis=1) + tm.assert_frame_equal(concatted_1, expected_columns) + + series1 = Series([0.1, 0.2]) + series2 = Series([0.3, 0.4]) + + # Index/row/0 Series + expected_index_series = Series([0.1, 0.2, 0.3, 0.4], index=[0, 1, 0, 1]) + + concatted_index_series = concat([series1, series2], axis="index") + tm.assert_series_equal(concatted_index_series, expected_index_series) + + concatted_row_series = concat([series1, series2], axis="rows") + tm.assert_series_equal(concatted_row_series, expected_index_series) + + concatted_0_series = concat([series1, series2], axis=0) + tm.assert_series_equal(concatted_0_series, expected_index_series) + + # Columns/1 Series + expected_columns_series = DataFrame( + [[0.1, 0.3], [0.2, 0.4]], index=[0, 1], columns=[0, 1] + ) + + concatted_columns_series = concat([series1, series2], axis="columns") + tm.assert_frame_equal(concatted_columns_series, expected_columns_series) + + concatted_1_series = concat([series1, series2], axis=1) + tm.assert_frame_equal(concatted_1_series, expected_columns_series) + + # Testing ValueError + with pytest.raises(ValueError, match="No axis named"): + concat([series1, series2], axis="something") + + def test_concat_numerical_names(self): + # GH#15262, GH#12223 + df = DataFrame( + {"col": range(9)}, + dtype="int32", + index=( + pd.MultiIndex.from_product( + [["A0", "A1", "A2"], ["B0", "B1", "B2"]], names=[1, 2] + ) + ), + ) + result = concat((df.iloc[:2, :], df.iloc[-2:, :])) + expected = DataFrame( + {"col": [0, 1, 7, 8]}, + dtype="int32", + index=pd.MultiIndex.from_tuples( + [("A0", "B0"), ("A0", "B1"), ("A2", "B1"), ("A2", "B2")], names=[1, 2] + ), + ) + tm.assert_frame_equal(result, expected) + + def test_concat_astype_dup_col(self): + # GH#23049 + df = DataFrame([{"a": "b"}]) + df = concat([df, df], axis=1) + + result = df.astype("category") + expected = DataFrame( + np.array(["b", "b"]).reshape(1, 2), columns=["a", "a"] + ).astype("category") + tm.assert_frame_equal(result, expected) + + def test_concat_dataframe_keys_bug(self, sort): + t1 = DataFrame( + {"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))} + ) + t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))}) + + # it works + result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort) + assert list(result.columns) == [("t1", "value"), ("t2", "value")] + + def test_concat_bool_with_int(self): + # GH#42092 we may want to change this to return object, but that + # would need a deprecation + df1 = DataFrame(Series([True, False, True, True], dtype="bool")) + df2 = DataFrame(Series([1, 0, 1], dtype="int64")) + + result = concat([df1, df2]) + expected = concat([df1.astype("int64"), df2]) + tm.assert_frame_equal(result, expected) + + def test_concat_duplicates_in_index_with_keys(self): + # GH#42651 + index = [1, 1, 3] + data = [1, 2, 3] + + df = DataFrame(data=data, index=index) + result = concat([df], keys=["A"], names=["ID", "date"]) + mi = pd.MultiIndex.from_product([["A"], index], names=["ID", "date"]) + expected = DataFrame(data=data, index=mi) + tm.assert_frame_equal(result, expected) + tm.assert_index_equal(result.index.levels[1], Index([1, 3], name="date")) + + @pytest.mark.parametrize("ignore_index", [True, False]) + @pytest.mark.parametrize("order", ["C", "F"]) + @pytest.mark.parametrize("axis", [0, 1]) + def test_concat_copies(self, axis, order, ignore_index, using_copy_on_write): + # based on asv ConcatDataFrames + df = DataFrame(np.zeros((10, 5), dtype=np.float32, order=order)) + + res = concat([df] * 5, axis=axis, ignore_index=ignore_index, copy=True) + + if not using_copy_on_write: + for arr in res._iter_column_arrays(): + for arr2 in df._iter_column_arrays(): + assert not np.shares_memory(arr, arr2) + + def test_outer_sort_columns(self): + # GH#47127 + df1 = DataFrame({"A": [0], "B": [1], 0: 1}) + df2 = DataFrame({"A": [100]}) + result = concat([df1, df2], ignore_index=True, join="outer", sort=True) + expected = DataFrame({0: [1.0, np.nan], "A": [0, 100], "B": [1.0, np.nan]}) + tm.assert_frame_equal(result, expected) + + def test_inner_sort_columns(self): + # GH#47127 + df1 = DataFrame({"A": [0], "B": [1], 0: 1}) + df2 = DataFrame({"A": [100], 0: 2}) + result = concat([df1, df2], ignore_index=True, join="inner", sort=True) + expected = DataFrame({0: [1, 2], "A": [0, 100]}) + tm.assert_frame_equal(result, expected) + + def test_sort_columns_one_df(self): + # GH#47127 + df1 = DataFrame({"A": [100], 0: 2}) + result = concat([df1], ignore_index=True, join="inner", sort=True) + expected = DataFrame({0: [2], "A": [100]}) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py new file mode 100644 index 0000000000000000000000000000000000000000..4c94dc0d51f7e7de9fbd43150b1331282fbabb90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_datetimes.py @@ -0,0 +1,606 @@ +import datetime as dt +from datetime import datetime + +import dateutil +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, + concat, + date_range, + to_timedelta, +) +import pandas._testing as tm + + +class TestDatetimeConcat: + def test_concat_datetime64_block(self): + rng = date_range("1/1/2000", periods=10) + + df = DataFrame({"time": rng}) + + result = concat([df, df]) + assert (result.iloc[:10]["time"] == rng).all() + assert (result.iloc[10:]["time"] == rng).all() + + def test_concat_datetime_datetime64_frame(self): + # GH#2624 + rows = [] + rows.append([datetime(2010, 1, 1), 1]) + rows.append([datetime(2010, 1, 2), "hi"]) + + df2_obj = DataFrame.from_records(rows, columns=["date", "test"]) + + ind = date_range(start="2000/1/1", freq="D", periods=10) + df1 = DataFrame({"date": ind, "test": range(10)}) + + # it works! + concat([df1, df2_obj]) + + def test_concat_datetime_timezone(self): + # GH 18523 + idx1 = date_range("2011-01-01", periods=3, freq="h", tz="Europe/Paris") + idx2 = date_range(start=idx1[0], end=idx1[-1], freq="h") + df1 = DataFrame({"a": [1, 2, 3]}, index=idx1) + df2 = DataFrame({"b": [1, 2, 3]}, index=idx2) + result = concat([df1, df2], axis=1) + + exp_idx = DatetimeIndex( + [ + "2011-01-01 00:00:00+01:00", + "2011-01-01 01:00:00+01:00", + "2011-01-01 02:00:00+01:00", + ], + dtype="M8[ns, Europe/Paris]", + freq="h", + ) + expected = DataFrame( + [[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"] + ) + + tm.assert_frame_equal(result, expected) + + idx3 = date_range("2011-01-01", periods=3, freq="h", tz="Asia/Tokyo") + df3 = DataFrame({"b": [1, 2, 3]}, index=idx3) + result = concat([df1, df3], axis=1) + + exp_idx = DatetimeIndex( + [ + "2010-12-31 15:00:00+00:00", + "2010-12-31 16:00:00+00:00", + "2010-12-31 17:00:00+00:00", + "2010-12-31 23:00:00+00:00", + "2011-01-01 00:00:00+00:00", + "2011-01-01 01:00:00+00:00", + ] + ).as_unit("ns") + + expected = DataFrame( + [ + [np.nan, 1], + [np.nan, 2], + [np.nan, 3], + [1, np.nan], + [2, np.nan], + [3, np.nan], + ], + index=exp_idx, + columns=["a", "b"], + ) + + tm.assert_frame_equal(result, expected) + + # GH 13783: Concat after resample + result = concat([df1.resample("h").mean(), df2.resample("h").mean()], sort=True) + expected = DataFrame( + {"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]}, + index=idx1.append(idx1), + ) + tm.assert_frame_equal(result, expected) + + def test_concat_datetimeindex_freq(self): + # GH 3232 + # Monotonic index result + dr = date_range("01-Jan-2013", periods=100, freq="50ms", tz="UTC") + data = list(range(100)) + expected = DataFrame(data, index=dr) + result = concat([expected[:50], expected[50:]]) + tm.assert_frame_equal(result, expected) + + # Non-monotonic index result + result = concat([expected[50:], expected[:50]]) + expected = DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50])) + expected.index._data.freq = None + tm.assert_frame_equal(result, expected) + + def test_concat_multiindex_datetime_object_index(self): + # https://github.com/pandas-dev/pandas/issues/11058 + idx = Index( + [dt.date(2013, 1, 1), dt.date(2014, 1, 1), dt.date(2015, 1, 1)], + dtype="object", + ) + + s = Series( + ["a", "b"], + index=MultiIndex.from_arrays( + [ + [1, 2], + idx[:-1], + ], + names=["first", "second"], + ), + ) + s2 = Series( + ["a", "b"], + index=MultiIndex.from_arrays( + [[1, 2], idx[::2]], + names=["first", "second"], + ), + ) + mi = MultiIndex.from_arrays( + [[1, 2, 2], idx], + names=["first", "second"], + ) + assert mi.levels[1].dtype == object + + expected = DataFrame( + [["a", "a"], ["b", np.nan], [np.nan, "b"]], + index=mi, + ) + result = concat([s, s2], axis=1) + tm.assert_frame_equal(result, expected) + + def test_concat_NaT_series(self): + # GH 11693 + # test for merging NaT series with datetime series. + x = Series( + date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="US/Eastern") + ) + y = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]") + expected = Series([x[0], x[1], pd.NaT, pd.NaT]) + + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + # all NaT with tz + expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns, US/Eastern]") + result = concat([y, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + def test_concat_NaT_series2(self): + # without tz + x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h")) + y = Series(date_range("20151124 10:00", "20151124 11:00", freq="1h")) + y[:] = pd.NaT + expected = Series([x[0], x[1], pd.NaT, pd.NaT]) + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + # all NaT without tz + x[:] = pd.NaT + expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns]") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + def test_concat_NaT_dataframes(self, tz): + # GH 12396 + + dti = DatetimeIndex([pd.NaT, pd.NaT], tz=tz) + first = DataFrame({0: dti}) + second = DataFrame( + [[Timestamp("2015/01/01", tz=tz)], [Timestamp("2016/01/01", tz=tz)]], + index=[2, 3], + ) + expected = DataFrame( + [ + pd.NaT, + pd.NaT, + Timestamp("2015/01/01", tz=tz), + Timestamp("2016/01/01", tz=tz), + ] + ) + + result = concat([first, second], axis=0) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz1", [None, "UTC"]) + @pytest.mark.parametrize("tz2", [None, "UTC"]) + @pytest.mark.parametrize("item", [pd.NaT, Timestamp("20150101")]) + def test_concat_NaT_dataframes_all_NaT_axis_0( + self, tz1, tz2, item, using_array_manager + ): + # GH 12396 + + # tz-naive + first = DataFrame([[pd.NaT], [pd.NaT]]).apply(lambda x: x.dt.tz_localize(tz1)) + second = DataFrame([item]).apply(lambda x: x.dt.tz_localize(tz2)) + + result = concat([first, second], axis=0) + expected = DataFrame(Series([pd.NaT, pd.NaT, item], index=[0, 1, 0])) + expected = expected.apply(lambda x: x.dt.tz_localize(tz2)) + if tz1 != tz2: + expected = expected.astype(object) + if item is pd.NaT and not using_array_manager: + # GH#18463 + # TODO: setting nan here is to keep the test passing as we + # make assert_frame_equal stricter, but is nan really the + # ideal behavior here? + if tz1 is not None: + expected.iloc[-1, 0] = np.nan + else: + expected.iloc[:-1, 0] = np.nan + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz1", [None, "UTC"]) + @pytest.mark.parametrize("tz2", [None, "UTC"]) + def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2): + # GH 12396 + + first = DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)) + second = DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1]) + expected = DataFrame( + { + 0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1), + 1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2), + } + ) + result = concat([first, second], axis=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz1", [None, "UTC"]) + @pytest.mark.parametrize("tz2", [None, "UTC"]) + def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): + # GH 12396 + + # tz-naive + first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1) + second = DataFrame( + [ + [Timestamp("2015/01/01", tz=tz2)], + [Timestamp("2016/01/01", tz=tz2)], + ], + index=[2, 3], + ) + + expected = DataFrame( + [ + pd.NaT, + pd.NaT, + Timestamp("2015/01/01", tz=tz2), + Timestamp("2016/01/01", tz=tz2), + ] + ) + if tz1 != tz2: + expected = expected.astype(object) + + result = concat([first, second]) + tm.assert_frame_equal(result, expected) + + +class TestTimezoneConcat: + def test_concat_tz_series(self): + # gh-11755: tz and no tz + x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC")) + y = Series(date_range("2012-01-01", "2012-01-02")) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + def test_concat_tz_series2(self): + # gh-11887: concat tz and object + x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC")) + y = Series(["a", "b"]) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + def test_concat_tz_series3(self, unit, unit2): + # see gh-12217 and gh-12306 + # Concatenating two UTC times + first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]") + first[0] = first[0].dt.tz_localize("UTC") + + second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]") + second[0] = second[0].dt.tz_localize("UTC") + + result = concat([first, second]) + exp_unit = tm.get_finest_unit(unit, unit2) + assert result[0].dtype == f"datetime64[{exp_unit}, UTC]" + + def test_concat_tz_series4(self, unit, unit2): + # Concatenating two London times + first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]") + first[0] = first[0].dt.tz_localize("Europe/London") + + second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]") + second[0] = second[0].dt.tz_localize("Europe/London") + + result = concat([first, second]) + exp_unit = tm.get_finest_unit(unit, unit2) + assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]" + + def test_concat_tz_series5(self, unit, unit2): + # Concatenating 2+1 London times + first = DataFrame( + [[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]], dtype=f"M8[{unit}]" + ) + first[0] = first[0].dt.tz_localize("Europe/London") + + second = DataFrame([[datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]") + second[0] = second[0].dt.tz_localize("Europe/London") + + result = concat([first, second]) + exp_unit = tm.get_finest_unit(unit, unit2) + assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]" + + def test_concat_tz_series6(self, unit, unit2): + # Concatenating 1+2 London times + first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]") + first[0] = first[0].dt.tz_localize("Europe/London") + + second = DataFrame( + [[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]" + ) + second[0] = second[0].dt.tz_localize("Europe/London") + + result = concat([first, second]) + exp_unit = tm.get_finest_unit(unit, unit2) + assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]" + + def test_concat_tz_series_tzlocal(self): + # see gh-13583 + x = [ + Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()), + Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()), + ] + y = [ + Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()), + Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()), + ] + + result = concat([Series(x), Series(y)], ignore_index=True) + tm.assert_series_equal(result, Series(x + y)) + assert result.dtype == "datetime64[ns, tzlocal()]" + + def test_concat_tz_series_with_datetimelike(self): + # see gh-12620: tz and timedelta + x = [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-02-01", tz="US/Eastern"), + ] + y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")] + result = concat([Series(x), Series(y)], ignore_index=True) + tm.assert_series_equal(result, Series(x + y, dtype="object")) + + # tz and period + y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")] + result = concat([Series(x), Series(y)], ignore_index=True) + tm.assert_series_equal(result, Series(x + y, dtype="object")) + + def test_concat_tz_frame(self): + df2 = DataFrame( + { + "A": Timestamp("20130102", tz="US/Eastern"), + "B": Timestamp("20130603", tz="CET"), + }, + index=range(5), + ) + + # concat + df3 = concat([df2.A.to_frame(), df2.B.to_frame()], axis=1) + tm.assert_frame_equal(df2, df3) + + def test_concat_multiple_tzs(self): + # GH#12467 + # combining datetime tz-aware and naive DataFrames + ts1 = Timestamp("2015-01-01", tz=None) + ts2 = Timestamp("2015-01-01", tz="UTC") + ts3 = Timestamp("2015-01-01", tz="EST") + + df1 = DataFrame({"time": [ts1]}) + df2 = DataFrame({"time": [ts2]}) + df3 = DataFrame({"time": [ts3]}) + + results = concat([df1, df2]).reset_index(drop=True) + expected = DataFrame({"time": [ts1, ts2]}, dtype=object) + tm.assert_frame_equal(results, expected) + + results = concat([df1, df3]).reset_index(drop=True) + expected = DataFrame({"time": [ts1, ts3]}, dtype=object) + tm.assert_frame_equal(results, expected) + + results = concat([df2, df3]).reset_index(drop=True) + expected = DataFrame({"time": [ts2, ts3]}) + tm.assert_frame_equal(results, expected) + + def test_concat_multiindex_with_tz(self): + # GH 6606 + df = DataFrame( + { + "dt": DatetimeIndex( + [ + datetime(2014, 1, 1), + datetime(2014, 1, 2), + datetime(2014, 1, 3), + ], + dtype="M8[ns, US/Pacific]", + ), + "b": ["A", "B", "C"], + "c": [1, 2, 3], + "d": [4, 5, 6], + } + ) + df = df.set_index(["dt", "b"]) + + exp_idx1 = DatetimeIndex( + ["2014-01-01", "2014-01-02", "2014-01-03"] * 2, + dtype="M8[ns, US/Pacific]", + name="dt", + ) + exp_idx2 = Index(["A", "B", "C"] * 2, name="b") + exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2]) + expected = DataFrame( + {"c": [1, 2, 3] * 2, "d": [4, 5, 6] * 2}, index=exp_idx, columns=["c", "d"] + ) + + result = concat([df, df]) + tm.assert_frame_equal(result, expected) + + def test_concat_tz_not_aligned(self): + # GH#22796 + ts = pd.to_datetime([1, 2]).tz_localize("UTC") + a = DataFrame({"A": ts}) + b = DataFrame({"A": ts, "B": ts}) + result = concat([a, b], sort=True, ignore_index=True) + expected = DataFrame( + {"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)} + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "t1", + [ + "2015-01-01", + pytest.param( + pd.NaT, + marks=pytest.mark.xfail( + reason="GH23037 incorrect dtype when concatenating" + ), + ), + ], + ) + def test_concat_tz_NaT(self, t1): + # GH#22796 + # Concatenating tz-aware multicolumn DataFrames + ts1 = Timestamp(t1, tz="UTC") + ts2 = Timestamp("2015-01-01", tz="UTC") + ts3 = Timestamp("2015-01-01", tz="UTC") + + df1 = DataFrame([[ts1, ts2]]) + df2 = DataFrame([[ts3]]) + + result = concat([df1, df2]) + expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0]) + + tm.assert_frame_equal(result, expected) + + def test_concat_tz_with_empty(self): + # GH 9188 + result = concat( + [DataFrame(date_range("2000", periods=1, tz="UTC")), DataFrame()] + ) + expected = DataFrame(date_range("2000", periods=1, tz="UTC")) + tm.assert_frame_equal(result, expected) + + +class TestPeriodConcat: + def test_concat_period_series(self): + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="D")) + expected = Series([x[0], x[1], y[0], y[1]], dtype="Period[D]") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + + def test_concat_period_multiple_freq_series(self): + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="M")) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + assert result.dtype == "object" + + def test_concat_period_other_series(self): + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="M")) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + assert result.dtype == "object" + + def test_concat_period_other_series2(self): + # non-period + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"])) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + assert result.dtype == "object" + + def test_concat_period_other_series3(self): + x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D")) + y = Series(["A", "B"]) + expected = Series([x[0], x[1], y[0], y[1]], dtype="object") + result = concat([x, y], ignore_index=True) + tm.assert_series_equal(result, expected) + assert result.dtype == "object" + + +def test_concat_timedelta64_block(): + rng = to_timedelta(np.arange(10), unit="s") + + df = DataFrame({"time": rng}) + + result = concat([df, df]) + tm.assert_frame_equal(result.iloc[:10], df) + tm.assert_frame_equal(result.iloc[10:], df) + + +def test_concat_multiindex_datetime_nat(): + # GH#44900 + left = DataFrame({"a": 1}, index=MultiIndex.from_tuples([(1, pd.NaT)])) + right = DataFrame( + {"b": 2}, index=MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)]) + ) + result = concat([left, right], axis="columns") + expected = DataFrame( + {"a": [1.0, np.nan], "b": 2}, MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)]) + ) + tm.assert_frame_equal(result, expected) + + +def test_concat_float_datetime64(using_array_manager): + # GH#32934 + df_time = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}) + df_float = DataFrame({"A": pd.array([1.0], dtype="float64")}) + + expected = DataFrame( + { + "A": [ + pd.array(["2000"], dtype="datetime64[ns]")[0], + pd.array([1.0], dtype="float64")[0], + ] + }, + index=[0, 0], + ) + result = concat([df_time, df_float]) + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"A": pd.array([], dtype="object")}) + result = concat([df_time.iloc[:0], df_float.iloc[:0]]) + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"A": pd.array([1.0], dtype="object")}) + result = concat([df_time.iloc[:0], df_float]) + tm.assert_frame_equal(result, expected) + + if not using_array_manager: + expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}) + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([df_time, df_float.iloc[:0]]) + tm.assert_frame_equal(result, expected) + else: + expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}).astype( + {"A": "object"} + ) + result = concat([df_time, df_float.iloc[:0]]) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py new file mode 100644 index 0000000000000000000000000000000000000000..30ef0a934157badba23ce2eb07de691498035254 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_empty.py @@ -0,0 +1,295 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + RangeIndex, + Series, + concat, + date_range, +) +import pandas._testing as tm + + +class TestEmptyConcat: + def test_handle_empty_objects(self, sort, using_infer_string): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), columns=list("abcd") + ) + + dfcopy = df[:5].copy() + dfcopy["foo"] = "bar" + empty = df[5:5] + + frames = [dfcopy, empty, empty, df[5:]] + concatted = concat(frames, axis=0, sort=sort) + + expected = df.reindex(columns=["a", "b", "c", "d", "foo"]) + expected["foo"] = expected["foo"].astype( + object if not using_infer_string else "string[pyarrow_numpy]" + ) + expected.loc[0:4, "foo"] = "bar" + + tm.assert_frame_equal(concatted, expected) + + # empty as first element with time series + # GH3259 + df = DataFrame( + {"A": range(10000)}, index=date_range("20130101", periods=10000, freq="s") + ) + empty = DataFrame() + result = concat([df, empty], axis=1) + tm.assert_frame_equal(result, df) + result = concat([empty, df], axis=1) + tm.assert_frame_equal(result, df) + + result = concat([df, empty]) + tm.assert_frame_equal(result, df) + result = concat([empty, df]) + tm.assert_frame_equal(result, df) + + def test_concat_empty_series(self): + # GH 11082 + s1 = Series([1, 2, 3], name="x") + s2 = Series(name="y", dtype="float64") + res = concat([s1, s2], axis=1) + exp = DataFrame( + {"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]}, + index=RangeIndex(3), + ) + tm.assert_frame_equal(res, exp) + + s1 = Series([1, 2, 3], name="x") + s2 = Series(name="y", dtype="float64") + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = concat([s1, s2], axis=0) + # name will be reset + exp = Series([1, 2, 3]) + tm.assert_series_equal(res, exp) + + # empty Series with no name + s1 = Series([1, 2, 3], name="x") + s2 = Series(name=None, dtype="float64") + res = concat([s1, s2], axis=1) + exp = DataFrame( + {"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]}, + columns=["x", 0], + index=RangeIndex(3), + ) + tm.assert_frame_equal(res, exp) + + @pytest.mark.parametrize("tz", [None, "UTC"]) + @pytest.mark.parametrize("values", [[], [1, 2, 3]]) + def test_concat_empty_series_timelike(self, tz, values): + # GH 18447 + + first = Series([], dtype="M8[ns]").dt.tz_localize(tz) + dtype = None if values else np.float64 + second = Series(values, dtype=dtype) + + expected = DataFrame( + { + 0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz), + 1: values, + } + ) + result = concat([first, second], axis=1) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "left,right,expected", + [ + # booleans + (np.bool_, np.int32, np.object_), # changed from int32 in 2.0 GH#39817 + (np.bool_, np.float32, np.object_), + # datetime-like + ("m8[ns]", np.bool_, np.object_), + ("m8[ns]", np.int64, np.object_), + ("M8[ns]", np.bool_, np.object_), + ("M8[ns]", np.int64, np.object_), + # categorical + ("category", "category", "category"), + ("category", "object", "object"), + ], + ) + def test_concat_empty_series_dtypes(self, left, right, expected): + # GH#39817, GH#45101 + result = concat([Series(dtype=left), Series(dtype=right)]) + assert result.dtype == expected + + @pytest.mark.parametrize( + "dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"] + ) + def test_concat_empty_series_dtypes_match_roundtrips(self, dtype): + dtype = np.dtype(dtype) + + result = concat([Series(dtype=dtype)]) + assert result.dtype == dtype + + result = concat([Series(dtype=dtype), Series(dtype=dtype)]) + assert result.dtype == dtype + + @pytest.mark.parametrize("dtype", ["float64", "int8", "uint8", "m8[ns]", "M8[ns]"]) + @pytest.mark.parametrize( + "dtype2", + ["float64", "int8", "uint8", "m8[ns]", "M8[ns]"], + ) + def test_concat_empty_series_dtypes_roundtrips(self, dtype, dtype2): + # round-tripping with self & like self + if dtype == dtype2: + pytest.skip("same dtype is not applicable for test") + + def int_result_type(dtype, dtype2): + typs = {dtype.kind, dtype2.kind} + if not len(typs - {"i", "u", "b"}) and ( + dtype.kind == "i" or dtype2.kind == "i" + ): + return "i" + elif not len(typs - {"u", "b"}) and ( + dtype.kind == "u" or dtype2.kind == "u" + ): + return "u" + return None + + def float_result_type(dtype, dtype2): + typs = {dtype.kind, dtype2.kind} + if not len(typs - {"f", "i", "u"}) and ( + dtype.kind == "f" or dtype2.kind == "f" + ): + return "f" + return None + + def get_result_type(dtype, dtype2): + result = float_result_type(dtype, dtype2) + if result is not None: + return result + result = int_result_type(dtype, dtype2) + if result is not None: + return result + return "O" + + dtype = np.dtype(dtype) + dtype2 = np.dtype(dtype2) + expected = get_result_type(dtype, dtype2) + result = concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype + assert result.kind == expected + + def test_concat_empty_series_dtypes_triple(self): + assert ( + concat( + [Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)] + ).dtype + == np.object_ + ) + + def test_concat_empty_series_dtype_category_with_array(self): + # GH#18515 + assert ( + concat( + [Series(np.array([]), dtype="category"), Series(dtype="float64")] + ).dtype + == "float64" + ) + + def test_concat_empty_series_dtypes_sparse(self): + result = concat( + [ + Series(dtype="float64").astype("Sparse"), + Series(dtype="float64").astype("Sparse"), + ] + ) + assert result.dtype == "Sparse[float64]" + + result = concat( + [Series(dtype="float64").astype("Sparse"), Series(dtype="float64")] + ) + expected = pd.SparseDtype(np.float64) + assert result.dtype == expected + + result = concat( + [Series(dtype="float64").astype("Sparse"), Series(dtype="object")] + ) + expected = pd.SparseDtype("object") + assert result.dtype == expected + + def test_concat_empty_df_object_dtype(self): + # GH 9149 + df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]}) + df_2 = DataFrame(columns=df_1.columns) + result = concat([df_1, df_2], axis=0) + expected = df_1.astype(object) + tm.assert_frame_equal(result, expected) + + def test_concat_empty_dataframe_dtypes(self): + df = DataFrame(columns=list("abc")) + df["a"] = df["a"].astype(np.bool_) + df["b"] = df["b"].astype(np.int32) + df["c"] = df["c"].astype(np.float64) + + result = concat([df, df]) + assert result["a"].dtype == np.bool_ + assert result["b"].dtype == np.int32 + assert result["c"].dtype == np.float64 + + result = concat([df, df.astype(np.float64)]) + assert result["a"].dtype == np.object_ + assert result["b"].dtype == np.float64 + assert result["c"].dtype == np.float64 + + def test_concat_inner_join_empty(self): + # GH 15328 + df_empty = DataFrame() + df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64") + df_expected = DataFrame({"a": []}, index=RangeIndex(0), dtype="int64") + + result = concat([df_a, df_empty], axis=1, join="inner") + tm.assert_frame_equal(result, df_expected) + + result = concat([df_a, df_empty], axis=1, join="outer") + tm.assert_frame_equal(result, df_a) + + def test_empty_dtype_coerce(self): + # xref to #12411 + # xref to #12045 + # xref to #11594 + # see below + + # 10571 + df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"]) + df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"]) + result = concat([df1, df2]) + expected = df1.dtypes + tm.assert_series_equal(result.dtypes, expected) + + def test_concat_empty_dataframe(self): + # 39037 + df1 = DataFrame(columns=["a", "b"]) + df2 = DataFrame(columns=["b", "c"]) + result = concat([df1, df2, df1]) + expected = DataFrame(columns=["a", "b", "c"]) + tm.assert_frame_equal(result, expected) + + df3 = DataFrame(columns=["a", "b"]) + df4 = DataFrame(columns=["b"]) + result = concat([df3, df4]) + expected = DataFrame(columns=["a", "b"]) + tm.assert_frame_equal(result, expected) + + def test_concat_empty_dataframe_different_dtypes(self, using_infer_string): + # 39037 + df1 = DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) + df2 = DataFrame({"a": [1, 2, 3]}) + + result = concat([df1[:0], df2[:0]]) + assert result["a"].dtype == np.int64 + assert result["b"].dtype == np.object_ if not using_infer_string else "string" + + def test_concat_to_empty_ea(self): + """48510 `concat` to an empty EA should maintain type EA dtype.""" + df_empty = DataFrame({"a": pd.array([], dtype=pd.Int64Dtype())}) + df_new = DataFrame({"a": pd.array([1, 2, 3], dtype=pd.Int64Dtype())}) + expected = df_new.copy() + result = concat([df_empty, df_new]) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py new file mode 100644 index 0000000000000000000000000000000000000000..52bb9fa0f151bb802d0cb440982230bd99c65b76 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_index.py @@ -0,0 +1,472 @@ +from copy import deepcopy + +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + concat, +) +import pandas._testing as tm + + +class TestIndexConcat: + def test_concat_ignore_index(self, sort): + frame1 = DataFrame( + {"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]} + ) + frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]}) + frame1.index = Index(["x", "y", "z"]) + frame2.index = Index(["x", "y", "q"]) + + v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort) + + nan = np.nan + expected = DataFrame( + [ + [nan, nan, nan, 4.3], + ["a", 1, 4.5, 5.2], + ["b", 2, 3.2, 2.2], + ["c", 3, 1.2, nan], + ], + index=Index(["q", "x", "y", "z"]), + ) + if not sort: + expected = expected.loc[["x", "y", "z", "q"]] + + tm.assert_frame_equal(v1, expected) + + @pytest.mark.parametrize( + "name_in1,name_in2,name_in3,name_out", + [ + ("idx", "idx", "idx", "idx"), + ("idx", "idx", None, None), + ("idx", None, None, None), + ("idx1", "idx2", None, None), + ("idx1", "idx1", "idx2", None), + ("idx1", "idx2", "idx3", None), + (None, None, None, None), + ], + ) + def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out): + # GH13475 + indices = [ + Index(["a", "b", "c"], name=name_in1), + Index(["b", "c", "d"], name=name_in2), + Index(["c", "d", "e"], name=name_in3), + ] + frames = [ + DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"]) + ] + result = concat(frames, axis=1) + + exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out) + expected = DataFrame( + { + "x": [0, 1, 2, np.nan, np.nan], + "y": [np.nan, 0, 1, 2, np.nan], + "z": [np.nan, np.nan, 0, 1, 2], + }, + index=exp_ind, + ) + + tm.assert_frame_equal(result, expected) + + def test_concat_rename_index(self): + a = DataFrame( + np.random.default_rng(2).random((3, 3)), + columns=list("ABC"), + index=Index(list("abc"), name="index_a"), + ) + b = DataFrame( + np.random.default_rng(2).random((3, 3)), + columns=list("ABC"), + index=Index(list("abc"), name="index_b"), + ) + + result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"]) + + exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"]) + names = list(exp.index.names) + names[1] = "lvl1" + exp.index.set_names(names, inplace=True) + + tm.assert_frame_equal(result, exp) + assert result.index.names == exp.index.names + + def test_concat_copy_index_series(self, axis, using_copy_on_write): + # GH 29879 + ser = Series([1, 2]) + comb = concat([ser, ser], axis=axis, copy=True) + if not using_copy_on_write or axis in [0, "index"]: + assert comb.index is not ser.index + else: + assert comb.index is ser.index + + def test_concat_copy_index_frame(self, axis, using_copy_on_write): + # GH 29879 + df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) + comb = concat([df, df], axis=axis, copy=True) + if not using_copy_on_write: + assert not comb.index.is_(df.index) + assert not comb.columns.is_(df.columns) + elif axis in [0, "index"]: + assert not comb.index.is_(df.index) + assert comb.columns.is_(df.columns) + elif axis in [1, "columns"]: + assert comb.index.is_(df.index) + assert not comb.columns.is_(df.columns) + + def test_default_index(self): + # is_series and ignore_index + s1 = Series([1, 2, 3], name="x") + s2 = Series([4, 5, 6], name="y") + res = concat([s1, s2], axis=1, ignore_index=True) + assert isinstance(res.columns, pd.RangeIndex) + exp = DataFrame([[1, 4], [2, 5], [3, 6]]) + # use check_index_type=True to check the result have + # RangeIndex (default index) + tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) + + # is_series and all inputs have no names + s1 = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + res = concat([s1, s2], axis=1, ignore_index=False) + assert isinstance(res.columns, pd.RangeIndex) + exp = DataFrame([[1, 4], [2, 5], [3, 6]]) + exp.columns = pd.RangeIndex(2) + tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) + + # is_dataframe and ignore_index + df1 = DataFrame({"A": [1, 2], "B": [5, 6]}) + df2 = DataFrame({"A": [3, 4], "B": [7, 8]}) + + res = concat([df1, df2], axis=0, ignore_index=True) + exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"]) + tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) + + res = concat([df1, df2], axis=1, ignore_index=True) + exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]]) + tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True) + + def test_dups_index(self): + # GH 4771 + + # single dtypes + df = DataFrame( + np.random.default_rng(2).integers(0, 10, size=40).reshape(10, 4), + columns=["A", "A", "C", "C"], + ) + + result = concat([df, df], axis=1) + tm.assert_frame_equal(result.iloc[:, :4], df) + tm.assert_frame_equal(result.iloc[:, 4:], df) + + result = concat([df, df], axis=0) + tm.assert_frame_equal(result.iloc[:10], df) + tm.assert_frame_equal(result.iloc[10:], df) + + # multi dtypes + df = concat( + [ + DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "A", "B", "B"], + ), + DataFrame( + np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2), + columns=["A", "C"], + ), + ], + axis=1, + ) + + result = concat([df, df], axis=1) + tm.assert_frame_equal(result.iloc[:, :6], df) + tm.assert_frame_equal(result.iloc[:, 6:], df) + + result = concat([df, df], axis=0) + tm.assert_frame_equal(result.iloc[:10], df) + tm.assert_frame_equal(result.iloc[10:], df) + + # append + result = df.iloc[0:8, :]._append(df.iloc[8:]) + tm.assert_frame_equal(result, df) + + result = df.iloc[0:8, :]._append(df.iloc[8:9])._append(df.iloc[9:10]) + tm.assert_frame_equal(result, df) + + expected = concat([df, df], axis=0) + result = df._append(df) + tm.assert_frame_equal(result, expected) + + +class TestMultiIndexConcat: + def test_concat_multiindex_with_keys(self, multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + index = frame.index + result = concat([frame, frame], keys=[0, 1], names=["iteration"]) + + assert result.index.names == ("iteration",) + index.names + tm.assert_frame_equal(result.loc[0], frame) + tm.assert_frame_equal(result.loc[1], frame) + assert result.index.nlevels == 3 + + def test_concat_multiindex_with_none_in_index_names(self): + # GH 15787 + index = MultiIndex.from_product([[1], range(5)], names=["level1", None]) + df = DataFrame({"col": range(5)}, index=index, dtype=np.int32) + + result = concat([df, df], keys=[1, 2], names=["level2"]) + index = MultiIndex.from_product( + [[1, 2], [1], range(5)], names=["level2", "level1", None] + ) + expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32) + tm.assert_frame_equal(result, expected) + + result = concat([df, df[:2]], keys=[1, 2], names=["level2"]) + level2 = [1] * 5 + [2] * 2 + level1 = [1] * 7 + no_name = list(range(5)) + list(range(2)) + tuples = list(zip(level2, level1, no_name)) + index = MultiIndex.from_tuples(tuples, names=["level2", "level1", None]) + expected = DataFrame({"col": no_name}, index=index, dtype=np.int32) + tm.assert_frame_equal(result, expected) + + def test_concat_multiindex_rangeindex(self): + # GH13542 + # when multi-index levels are RangeIndex objects + # there is a bug in concat with objects of len 1 + + df = DataFrame(np.random.default_rng(2).standard_normal((9, 2))) + df.index = MultiIndex( + levels=[pd.RangeIndex(3), pd.RangeIndex(3)], + codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)], + ) + + res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]]) + exp = df.iloc[[2, 3, 4, 5], :] + tm.assert_frame_equal(res, exp) + + def test_concat_multiindex_dfs_with_deepcopy(self): + # GH 9967 + example_multiindex1 = MultiIndex.from_product([["a"], ["b"]]) + example_dataframe1 = DataFrame([0], index=example_multiindex1) + + example_multiindex2 = MultiIndex.from_product([["a"], ["c"]]) + example_dataframe2 = DataFrame([1], index=example_multiindex2) + + example_dict = {"s1": example_dataframe1, "s2": example_dataframe2} + expected_index = MultiIndex( + levels=[["s1", "s2"], ["a"], ["b", "c"]], + codes=[[0, 1], [0, 0], [0, 1]], + names=["testname", None, None], + ) + expected = DataFrame([[0], [1]], index=expected_index) + result_copy = concat(deepcopy(example_dict), names=["testname"]) + tm.assert_frame_equal(result_copy, expected) + result_no_copy = concat(example_dict, names=["testname"]) + tm.assert_frame_equal(result_no_copy, expected) + + @pytest.mark.parametrize( + "mi1_list", + [ + [["a"], range(2)], + [["b"], np.arange(2.0, 4.0)], + [["c"], ["A", "B"]], + [["d"], pd.date_range(start="2017", end="2018", periods=2)], + ], + ) + @pytest.mark.parametrize( + "mi2_list", + [ + [["a"], range(2)], + [["b"], np.arange(2.0, 4.0)], + [["c"], ["A", "B"]], + [["d"], pd.date_range(start="2017", end="2018", periods=2)], + ], + ) + def test_concat_with_various_multiindex_dtypes( + self, mi1_list: list, mi2_list: list + ): + # GitHub #23478 + mi1 = MultiIndex.from_product(mi1_list) + mi2 = MultiIndex.from_product(mi2_list) + + df1 = DataFrame(np.zeros((1, len(mi1))), columns=mi1) + df2 = DataFrame(np.zeros((1, len(mi2))), columns=mi2) + + if mi1_list[0] == mi2_list[0]: + expected_mi = MultiIndex( + levels=[mi1_list[0], list(mi1_list[1])], + codes=[[0, 0, 0, 0], [0, 1, 0, 1]], + ) + else: + expected_mi = MultiIndex( + levels=[ + mi1_list[0] + mi2_list[0], + list(mi1_list[1]) + list(mi2_list[1]), + ], + codes=[[0, 0, 1, 1], [0, 1, 2, 3]], + ) + + expected_df = DataFrame(np.zeros((1, len(expected_mi))), columns=expected_mi) + + with tm.assert_produces_warning(None): + result_df = concat((df1, df2), axis=1) + + tm.assert_frame_equal(expected_df, result_df) + + def test_concat_multiindex_(self): + # GitHub #44786 + df = DataFrame({"col": ["a", "b", "c"]}, index=["1", "2", "2"]) + df = concat([df], keys=["X"]) + + iterables = [["X"], ["1", "2", "2"]] + result_index = df.index + expected_index = MultiIndex.from_product(iterables) + + tm.assert_index_equal(result_index, expected_index) + + result_df = df + expected_df = DataFrame( + {"col": ["a", "b", "c"]}, index=MultiIndex.from_product(iterables) + ) + tm.assert_frame_equal(result_df, expected_df) + + def test_concat_with_key_not_unique(self): + # GitHub #46519 + df1 = DataFrame({"name": [1]}) + df2 = DataFrame({"name": [2]}) + df3 = DataFrame({"name": [3]}) + df_a = concat([df1, df2, df3], keys=["x", "y", "x"]) + # the warning is caused by indexing unsorted multi-index + with tm.assert_produces_warning( + PerformanceWarning, match="indexing past lexsort depth" + ): + out_a = df_a.loc[("x", 0), :] + + df_b = DataFrame( + {"name": [1, 2, 3]}, index=Index([("x", 0), ("y", 0), ("x", 0)]) + ) + with tm.assert_produces_warning( + PerformanceWarning, match="indexing past lexsort depth" + ): + out_b = df_b.loc[("x", 0)] + + tm.assert_frame_equal(out_a, out_b) + + df1 = DataFrame({"name": ["a", "a", "b"]}) + df2 = DataFrame({"name": ["a", "b"]}) + df3 = DataFrame({"name": ["c", "d"]}) + df_a = concat([df1, df2, df3], keys=["x", "y", "x"]) + with tm.assert_produces_warning( + PerformanceWarning, match="indexing past lexsort depth" + ): + out_a = df_a.loc[("x", 0), :] + + df_b = DataFrame( + { + "a": ["x", "x", "x", "y", "y", "x", "x"], + "b": [0, 1, 2, 0, 1, 0, 1], + "name": list("aababcd"), + } + ).set_index(["a", "b"]) + df_b.index.names = [None, None] + with tm.assert_produces_warning( + PerformanceWarning, match="indexing past lexsort depth" + ): + out_b = df_b.loc[("x", 0), :] + + tm.assert_frame_equal(out_a, out_b) + + def test_concat_with_duplicated_levels(self): + # keyword levels should be unique + df1 = DataFrame({"A": [1]}, index=["x"]) + df2 = DataFrame({"A": [1]}, index=["y"]) + msg = r"Level values not unique: \['x', 'y', 'y'\]" + with pytest.raises(ValueError, match=msg): + concat([df1, df2], keys=["x", "y"], levels=[["x", "y", "y"]]) + + @pytest.mark.parametrize("levels", [[["x", "y"]], [["x", "y", "y"]]]) + def test_concat_with_levels_with_none_keys(self, levels): + df1 = DataFrame({"A": [1]}, index=["x"]) + df2 = DataFrame({"A": [1]}, index=["y"]) + msg = "levels supported only when keys is not None" + with pytest.raises(ValueError, match=msg): + concat([df1, df2], levels=levels) + + def test_concat_range_index_result(self): + # GH#47501 + df1 = DataFrame({"a": [1, 2]}) + df2 = DataFrame({"b": [1, 2]}) + + result = concat([df1, df2], sort=True, axis=1) + expected = DataFrame({"a": [1, 2], "b": [1, 2]}) + tm.assert_frame_equal(result, expected) + expected_index = pd.RangeIndex(0, 2) + tm.assert_index_equal(result.index, expected_index, exact=True) + + def test_concat_index_keep_dtype(self): + # GH#47329 + df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype="object")) + df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="object")) + result = concat([df1, df2], ignore_index=True, join="outer", sort=True) + expected = DataFrame( + [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="object") + ) + tm.assert_frame_equal(result, expected) + + def test_concat_index_keep_dtype_ea_numeric(self, any_numeric_ea_dtype): + # GH#47329 + df1 = DataFrame( + [[0, 1, 1]], columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype) + ) + df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype=any_numeric_ea_dtype)) + result = concat([df1, df2], ignore_index=True, join="outer", sort=True) + expected = DataFrame( + [[0, 1, 1.0], [0, 1, np.nan]], + columns=Index([1, 2, 3], dtype=any_numeric_ea_dtype), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["Int8", "Int16", "Int32"]) + def test_concat_index_find_common(self, dtype): + # GH#47329 + df1 = DataFrame([[0, 1, 1]], columns=Index([1, 2, 3], dtype=dtype)) + df2 = DataFrame([[0, 1]], columns=Index([1, 2], dtype="Int32")) + result = concat([df1, df2], ignore_index=True, join="outer", sort=True) + expected = DataFrame( + [[0, 1, 1.0], [0, 1, np.nan]], columns=Index([1, 2, 3], dtype="Int32") + ) + tm.assert_frame_equal(result, expected) + + def test_concat_axis_1_sort_false_rangeindex(self, using_infer_string): + # GH 46675 + s1 = Series(["a", "b", "c"]) + s2 = Series(["a", "b"]) + s3 = Series(["a", "b", "c", "d"]) + s4 = Series( + [], dtype=object if not using_infer_string else "string[pyarrow_numpy]" + ) + result = concat( + [s1, s2, s3, s4], sort=False, join="outer", ignore_index=False, axis=1 + ) + expected = DataFrame( + [ + ["a"] * 3 + [np.nan], + ["b"] * 3 + [np.nan], + ["c", np.nan] * 2, + [np.nan] * 2 + ["d"] + [np.nan], + ], + dtype=object if not using_infer_string else "string[pyarrow_numpy]", + ) + tm.assert_frame_equal( + result, expected, check_index_type=True, check_column_type=True + ) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py new file mode 100644 index 0000000000000000000000000000000000000000..b1c44fc0debc397659354a3fa26c9ab6d33b8b40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_invalid.py @@ -0,0 +1,54 @@ +from io import StringIO + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + concat, + read_csv, +) +import pandas._testing as tm + + +class TestInvalidConcat: + @pytest.mark.parametrize("obj", [1, {}, [1, 2], (1, 2)]) + def test_concat_invalid(self, obj): + # trying to concat a ndframe with a non-ndframe + df1 = DataFrame(range(2)) + msg = ( + f"cannot concatenate object of type '{type(obj)}'; " + "only Series and DataFrame objs are valid" + ) + with pytest.raises(TypeError, match=msg): + concat([df1, obj]) + + def test_concat_invalid_first_argument(self): + df1 = DataFrame(range(2)) + msg = ( + "first argument must be an iterable of pandas " + 'objects, you passed an object of type "DataFrame"' + ) + with pytest.raises(TypeError, match=msg): + concat(df1) + + def test_concat_generator_obj(self): + # generator ok though + concat(DataFrame(np.random.default_rng(2).random((5, 5))) for _ in range(3)) + + def test_concat_textreader_obj(self): + # text reader ok + # GH6583 + data = """index,A,B,C,D + foo,2,3,4,5 + bar,7,8,9,10 + baz,12,13,14,15 + qux,12,13,14,15 + foo2,12,13,14,15 + bar2,12,13,14,15 + """ + + with read_csv(StringIO(data), chunksize=1) as reader: + result = concat(reader, ignore_index=True) + expected = read_csv(StringIO(data)) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py new file mode 100644 index 0000000000000000000000000000000000000000..c12b835cb61e1545d3fc5a61f16f195166edd1fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_series.py @@ -0,0 +1,175 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + concat, + date_range, +) +import pandas._testing as tm + + +class TestSeriesConcat: + def test_concat_series(self): + ts = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20), + name="foo", + ) + ts.name = "foo" + + pieces = [ts[:5], ts[5:15], ts[15:]] + + result = concat(pieces) + tm.assert_series_equal(result, ts) + assert result.name == ts.name + + result = concat(pieces, keys=[0, 1, 2]) + expected = ts.copy() + + ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]")) + + exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))] + exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes) + expected.index = exp_index + tm.assert_series_equal(result, expected) + + def test_concat_empty_and_non_empty_series_regression(self): + # GH 18187 regression test + s1 = Series([1]) + s2 = Series([], dtype=object) + + expected = s1 + msg = "The behavior of array concatenation with empty entries is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = concat([s1, s2]) + tm.assert_series_equal(result, expected) + + def test_concat_series_axis1(self): + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + + pieces = [ts[:-2], ts[2:], ts[2:-2]] + + result = concat(pieces, axis=1) + expected = DataFrame(pieces).T + tm.assert_frame_equal(result, expected) + + result = concat(pieces, keys=["A", "B", "C"], axis=1) + expected = DataFrame(pieces, index=["A", "B", "C"]).T + tm.assert_frame_equal(result, expected) + + def test_concat_series_axis1_preserves_series_names(self): + # preserve series names, #2489 + s = Series(np.random.default_rng(2).standard_normal(5), name="A") + s2 = Series(np.random.default_rng(2).standard_normal(5), name="B") + + result = concat([s, s2], axis=1) + expected = DataFrame({"A": s, "B": s2}) + tm.assert_frame_equal(result, expected) + + s2.name = None + result = concat([s, s2], axis=1) + tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object")) + + def test_concat_series_axis1_with_reindex(self, sort): + # must reindex, #2603 + s = Series( + np.random.default_rng(2).standard_normal(3), index=["c", "a", "b"], name="A" + ) + s2 = Series( + np.random.default_rng(2).standard_normal(4), + index=["d", "a", "b", "c"], + name="B", + ) + result = concat([s, s2], axis=1, sort=sort) + expected = DataFrame({"A": s, "B": s2}, index=["c", "a", "b", "d"]) + if sort: + expected = expected.sort_index() + tm.assert_frame_equal(result, expected) + + def test_concat_series_axis1_names_applied(self): + # ensure names argument is not ignored on axis=1, #23490 + s = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"]) + expected = DataFrame( + [[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A") + ) + tm.assert_frame_equal(result, expected) + + result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"]) + expected = DataFrame( + [[1, 4], [2, 5], [3, 6]], + columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]), + ) + tm.assert_frame_equal(result, expected) + + def test_concat_series_axis1_same_names_ignore_index(self): + dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1] + s1 = Series( + np.random.default_rng(2).standard_normal(len(dates)), + index=dates, + name="value", + ) + s2 = Series( + np.random.default_rng(2).standard_normal(len(dates)), + index=dates, + name="value", + ) + + result = concat([s1, s2], axis=1, ignore_index=True) + expected = Index(range(2)) + + tm.assert_index_equal(result.columns, expected, exact=True) + + @pytest.mark.parametrize( + "s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))] + ) + def test_concat_series_name_npscalar_tuple(self, s1name, s2name): + # GH21015 + s1 = Series({"a": 1, "b": 2}, name=s1name) + s2 = Series({"c": 5, "d": 6}, name=s2name) + result = concat([s1, s2]) + expected = Series({"a": 1, "b": 2, "c": 5, "d": 6}) + tm.assert_series_equal(result, expected) + + def test_concat_series_partial_columns_names(self): + # GH10698 + named_series = Series([1, 2], name="foo") + unnamed_series1 = Series([1, 2]) + unnamed_series2 = Series([4, 5]) + + result = concat([named_series, unnamed_series1, unnamed_series2], axis=1) + expected = DataFrame( + {"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1] + ) + tm.assert_frame_equal(result, expected) + + result = concat( + [named_series, unnamed_series1, unnamed_series2], + axis=1, + keys=["red", "blue", "yellow"], + ) + expected = DataFrame( + {"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]}, + columns=["red", "blue", "yellow"], + ) + tm.assert_frame_equal(result, expected) + + result = concat( + [named_series, unnamed_series1, unnamed_series2], axis=1, ignore_index=True + ) + expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]}) + tm.assert_frame_equal(result, expected) + + def test_concat_series_length_one_reversed(self, frame_or_series): + # GH39401 + obj = frame_or_series([100]) + result = concat([obj.iloc[::-1]]) + tm.assert_equal(result, obj) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_sort.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_sort.py new file mode 100644 index 0000000000000000000000000000000000000000..2724f819588933cc307dd396fdcd024f04c38eaa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/concat/test_sort.py @@ -0,0 +1,118 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + + +class TestConcatSort: + def test_concat_sorts_columns(self, sort): + # GH-4588 + df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) + df2 = DataFrame({"a": [3, 4], "c": [5, 6]}) + + # for sort=True/None + expected = DataFrame( + {"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]}, + columns=["a", "b", "c"], + ) + + if sort is False: + expected = expected[["b", "a", "c"]] + + # default + with tm.assert_produces_warning(None): + result = pd.concat([df1, df2], ignore_index=True, sort=sort) + tm.assert_frame_equal(result, expected) + + def test_concat_sorts_index(self, sort): + df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"]) + df2 = DataFrame({"b": [1, 2]}, index=["a", "b"]) + + # For True/None + expected = DataFrame( + {"a": [2, 3, 1], "b": [1, 2, None]}, + index=["a", "b", "c"], + columns=["a", "b"], + ) + if sort is False: + expected = expected.loc[["c", "a", "b"]] + + # Warn and sort by default + with tm.assert_produces_warning(None): + result = pd.concat([df1, df2], axis=1, sort=sort) + tm.assert_frame_equal(result, expected) + + def test_concat_inner_sort(self, sort): + # https://github.com/pandas-dev/pandas/pull/20613 + df1 = DataFrame( + {"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"] + ) + df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4]) + + with tm.assert_produces_warning(None): + # unset sort should *not* warn for inner join + # since that never sorted + result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True) + + expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"]) + if sort is True: + expected = expected[["a", "b"]] + tm.assert_frame_equal(result, expected) + + def test_concat_aligned_sort(self): + # GH-4588 + df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"]) + result = pd.concat([df, df], sort=True, ignore_index=True) + expected = DataFrame( + {"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]}, + columns=["a", "b", "c"], + ) + tm.assert_frame_equal(result, expected) + + result = pd.concat( + [df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True + ) + expected = expected[["b", "c"]] + tm.assert_frame_equal(result, expected) + + def test_concat_aligned_sort_does_not_raise(self): + # GH-4588 + # We catch TypeErrors from sorting internally and do not re-raise. + df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"]) + expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"]) + result = pd.concat([df, df], ignore_index=True, sort=True) + tm.assert_frame_equal(result, expected) + + def test_concat_frame_with_sort_false(self): + # GH 43375 + result = pd.concat( + [DataFrame({i: i}, index=[i]) for i in range(2, 0, -1)], sort=False + ) + expected = DataFrame([[2, np.nan], [np.nan, 1]], index=[2, 1], columns=[2, 1]) + + tm.assert_frame_equal(result, expected) + + # GH 37937 + df1 = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[1, 2, 3]) + df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}, index=[3, 1, 6]) + result = pd.concat([df2, df1], axis=1, sort=False) + expected = DataFrame( + [ + [7.0, 10.0, 3.0, 6.0], + [8.0, 11.0, 1.0, 4.0], + [9.0, 12.0, np.nan, np.nan], + [np.nan, np.nan, 2.0, 5.0], + ], + index=[3, 1, 6, 2], + columns=["c", "d", "a", "b"], + ) + tm.assert_frame_equal(result, expected) + + def test_concat_sort_none_raises(self): + # GH#41518 + df = DataFrame({1: [1, 2], "a": [3, 4]}) + msg = "The 'sort' keyword only accepts boolean values; None was passed." + with pytest.raises(ValueError, match=msg): + pd.concat([df, df], sort=None) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77b179c836dd63a80b445a5d68b4f19657fe4630 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..489b14cafd0dbf72343a48838d2488a41f7dd9d3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_join.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75dfc973fb60f741b9f36323a1339ef549deef54 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_asof.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_asof.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a28720f9b0a03cb6fb0a03927a6c7561afad6d3d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_asof.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9071e7e10cc39e13d85006d941c4147b7acc5dce Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_cross.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a6624b77b49f09de5d1c803478782d38421baf7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_index_as_string.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f060c71af6989e42b09800912b8dbe45601f9062 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_merge_ordered.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68cfaf15f23b9286d9adc6fd68967de5ca0078c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/__pycache__/test_multi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_join.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_join.py new file mode 100644 index 0000000000000000000000000000000000000000..db5a0437a14f0c67effe8351930fb5d39f4fa514 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_join.py @@ -0,0 +1,1101 @@ +import re + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + MultiIndex, + Series, + Timestamp, + bdate_range, + concat, + merge, + option_context, +) +import pandas._testing as tm + + +def get_test_data(ngroups=8, n=50): + unique_groups = list(range(ngroups)) + arr = np.asarray(np.tile(unique_groups, n // ngroups)) + + if len(arr) < n: + arr = np.asarray(list(arr) + unique_groups[: n - len(arr)]) + + np.random.default_rng(2).shuffle(arr) + return arr + + +class TestJoin: + # aggregate multiple columns + @pytest.fixture + def df(self): + df = DataFrame( + { + "key1": get_test_data(), + "key2": get_test_data(), + "data1": np.random.default_rng(2).standard_normal(50), + "data2": np.random.default_rng(2).standard_normal(50), + } + ) + + # exclude a couple keys for fun + df = df[df["key2"] > 1] + return df + + @pytest.fixture + def df2(self): + return DataFrame( + { + "key1": get_test_data(n=10), + "key2": get_test_data(ngroups=4, n=10), + "value": np.random.default_rng(2).standard_normal(10), + } + ) + + @pytest.fixture + def target_source(self): + data = { + "A": [0.0, 1.0, 2.0, 3.0, 4.0], + "B": [0.0, 1.0, 0.0, 1.0, 0.0], + "C": ["foo1", "foo2", "foo3", "foo4", "foo5"], + "D": bdate_range("1/1/2009", periods=5), + } + target = DataFrame(data, index=Index(["a", "b", "c", "d", "e"], dtype=object)) + + # Join on string value + + source = DataFrame( + {"MergedA": data["A"], "MergedD": data["D"]}, index=data["C"] + ) + return target, source + + def test_left_outer_join(self, df, df2): + joined_key2 = merge(df, df2, on="key2") + _check_join(df, df2, joined_key2, ["key2"], how="left") + + joined_both = merge(df, df2) + _check_join(df, df2, joined_both, ["key1", "key2"], how="left") + + def test_right_outer_join(self, df, df2): + joined_key2 = merge(df, df2, on="key2", how="right") + _check_join(df, df2, joined_key2, ["key2"], how="right") + + joined_both = merge(df, df2, how="right") + _check_join(df, df2, joined_both, ["key1", "key2"], how="right") + + def test_full_outer_join(self, df, df2): + joined_key2 = merge(df, df2, on="key2", how="outer") + _check_join(df, df2, joined_key2, ["key2"], how="outer") + + joined_both = merge(df, df2, how="outer") + _check_join(df, df2, joined_both, ["key1", "key2"], how="outer") + + def test_inner_join(self, df, df2): + joined_key2 = merge(df, df2, on="key2", how="inner") + _check_join(df, df2, joined_key2, ["key2"], how="inner") + + joined_both = merge(df, df2, how="inner") + _check_join(df, df2, joined_both, ["key1", "key2"], how="inner") + + def test_handle_overlap(self, df, df2): + joined = merge(df, df2, on="key2", suffixes=(".foo", ".bar")) + + assert "key1.foo" in joined + assert "key1.bar" in joined + + def test_handle_overlap_arbitrary_key(self, df, df2): + joined = merge( + df, + df2, + left_on="key2", + right_on="key1", + suffixes=(".foo", ".bar"), + ) + assert "key1.foo" in joined + assert "key2.bar" in joined + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + def test_join_on(self, target_source, infer_string): + target, source = target_source + + merged = target.join(source, on="C") + tm.assert_series_equal(merged["MergedA"], target["A"], check_names=False) + tm.assert_series_equal(merged["MergedD"], target["D"], check_names=False) + + # join with duplicates (fix regression from DataFrame/Matrix merge) + df = DataFrame({"key": ["a", "a", "b", "b", "c"]}) + df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"]) + joined = df.join(df2, on="key") + expected = DataFrame( + {"key": ["a", "a", "b", "b", "c"], "value": [0, 0, 1, 1, 2]} + ) + tm.assert_frame_equal(joined, expected) + + # Test when some are missing + df_a = DataFrame([[1], [2], [3]], index=["a", "b", "c"], columns=["one"]) + df_b = DataFrame([["foo"], ["bar"]], index=[1, 2], columns=["two"]) + df_c = DataFrame([[1], [2]], index=[1, 2], columns=["three"]) + joined = df_a.join(df_b, on="one") + joined = joined.join(df_c, on="one") + assert np.isnan(joined["two"]["c"]) + assert np.isnan(joined["three"]["c"]) + + # merge column not p resent + with pytest.raises(KeyError, match="^'E'$"): + target.join(source, on="E") + + # overlap + source_copy = source.copy() + msg = ( + "You are trying to merge on float64 and object|string columns for key " + "'A'. If you wish to proceed you should use pd.concat" + ) + with pytest.raises(ValueError, match=msg): + target.join(source_copy, on="A") + + def test_join_on_fails_with_different_right_index(self): + df = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=3), + "b": np.random.default_rng(2).standard_normal(3), + } + ) + df2 = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=10), + "b": np.random.default_rng(2).standard_normal(10), + }, + index=MultiIndex.from_product([range(5), ["A", "B"]]), + ) + msg = r'len\(left_on\) must equal the number of levels in the index of "right"' + with pytest.raises(ValueError, match=msg): + merge(df, df2, left_on="a", right_index=True) + + def test_join_on_fails_with_different_left_index(self): + df = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=3), + "b": np.random.default_rng(2).standard_normal(3), + }, + index=MultiIndex.from_arrays([range(3), list("abc")]), + ) + df2 = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=10), + "b": np.random.default_rng(2).standard_normal(10), + } + ) + msg = r'len\(right_on\) must equal the number of levels in the index of "left"' + with pytest.raises(ValueError, match=msg): + merge(df, df2, right_on="b", left_index=True) + + def test_join_on_fails_with_different_column_counts(self): + df = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=3), + "b": np.random.default_rng(2).standard_normal(3), + } + ) + df2 = DataFrame( + { + "a": np.random.default_rng(2).choice(["m", "f"], size=10), + "b": np.random.default_rng(2).standard_normal(10), + }, + index=MultiIndex.from_product([range(5), ["A", "B"]]), + ) + msg = r"len\(right_on\) must equal len\(left_on\)" + with pytest.raises(ValueError, match=msg): + merge(df, df2, right_on="a", left_on=["a", "b"]) + + @pytest.mark.parametrize("wrong_type", [2, "str", None, np.array([0, 1])]) + def test_join_on_fails_with_wrong_object_type(self, wrong_type): + # GH12081 - original issue + + # GH21220 - merging of Series and DataFrame is now allowed + # Edited test to remove the Series object from test parameters + + df = DataFrame({"a": [1, 1]}) + msg = ( + "Can only merge Series or DataFrame objects, " + f"a {type(wrong_type)} was passed" + ) + with pytest.raises(TypeError, match=msg): + merge(wrong_type, df, left_on="a", right_on="a") + with pytest.raises(TypeError, match=msg): + merge(df, wrong_type, left_on="a", right_on="a") + + def test_join_on_pass_vector(self, target_source): + target, source = target_source + expected = target.join(source, on="C") + expected = expected.rename(columns={"C": "key_0"}) + expected = expected[["key_0", "A", "B", "D", "MergedA", "MergedD"]] + + join_col = target.pop("C") + result = target.join(source, on=join_col) + tm.assert_frame_equal(result, expected) + + def test_join_with_len0(self, target_source): + # nothing to merge + target, source = target_source + merged = target.join(source.reindex([]), on="C") + for col in source: + assert col in merged + assert merged[col].isna().all() + + merged2 = target.join(source.reindex([]), on="C", how="inner") + tm.assert_index_equal(merged2.columns, merged.columns) + assert len(merged2) == 0 + + def test_join_on_inner(self): + df = DataFrame({"key": ["a", "a", "d", "b", "b", "c"]}) + df2 = DataFrame({"value": [0, 1]}, index=["a", "b"]) + + joined = df.join(df2, on="key", how="inner") + + expected = df.join(df2, on="key") + expected = expected[expected["value"].notna()] + tm.assert_series_equal(joined["key"], expected["key"]) + tm.assert_series_equal(joined["value"], expected["value"], check_dtype=False) + tm.assert_index_equal(joined.index, expected.index) + + def test_join_on_singlekey_list(self): + df = DataFrame({"key": ["a", "a", "b", "b", "c"]}) + df2 = DataFrame({"value": [0, 1, 2]}, index=["a", "b", "c"]) + + # corner cases + joined = df.join(df2, on=["key"]) + expected = df.join(df2, on="key") + + tm.assert_frame_equal(joined, expected) + + def test_join_on_series(self, target_source): + target, source = target_source + result = target.join(source["MergedA"], on="C") + expected = target.join(source[["MergedA"]], on="C") + tm.assert_frame_equal(result, expected) + + def test_join_on_series_buglet(self): + # GH #638 + df = DataFrame({"a": [1, 1]}) + ds = Series([2], index=[1], name="b") + result = df.join(ds, on="a") + expected = DataFrame({"a": [1, 1], "b": [2, 2]}, index=df.index) + tm.assert_frame_equal(result, expected) + + def test_join_index_mixed(self, join_type): + # no overlapping blocks + df1 = DataFrame(index=np.arange(10)) + df1["bool"] = True + df1["string"] = "foo" + + df2 = DataFrame(index=np.arange(5, 15)) + df2["int"] = 1 + df2["float"] = 1.0 + + joined = df1.join(df2, how=join_type) + expected = _join_by_hand(df1, df2, how=join_type) + tm.assert_frame_equal(joined, expected) + + joined = df2.join(df1, how=join_type) + expected = _join_by_hand(df2, df1, how=join_type) + tm.assert_frame_equal(joined, expected) + + def test_join_index_mixed_overlap(self): + df1 = DataFrame( + {"A": 1.0, "B": 2, "C": "foo", "D": True}, + index=np.arange(10), + columns=["A", "B", "C", "D"], + ) + assert df1["B"].dtype == np.int64 + assert df1["D"].dtype == np.bool_ + + df2 = DataFrame( + {"A": 1.0, "B": 2, "C": "foo", "D": True}, + index=np.arange(0, 10, 2), + columns=["A", "B", "C", "D"], + ) + + # overlap + joined = df1.join(df2, lsuffix="_one", rsuffix="_two") + expected_columns = [ + "A_one", + "B_one", + "C_one", + "D_one", + "A_two", + "B_two", + "C_two", + "D_two", + ] + df1.columns = expected_columns[:4] + df2.columns = expected_columns[4:] + expected = _join_by_hand(df1, df2) + tm.assert_frame_equal(joined, expected) + + def test_join_empty_bug(self): + # generated an exception in 0.4.3 + x = DataFrame() + x.join(DataFrame([3], index=[0], columns=["A"]), how="outer") + + def test_join_unconsolidated(self): + # GH #331 + a = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), columns=["a", "b"] + ) + c = Series(np.random.default_rng(2).standard_normal(30)) + a["c"] = c + d = DataFrame(np.random.default_rng(2).standard_normal((30, 1)), columns=["q"]) + + # it works! + a.join(d) + d.join(a) + + def test_join_multiindex(self): + index1 = MultiIndex.from_arrays( + [["a", "a", "a", "b", "b", "b"], [1, 2, 3, 1, 2, 3]], + names=["first", "second"], + ) + + index2 = MultiIndex.from_arrays( + [["b", "b", "b", "c", "c", "c"], [1, 2, 3, 1, 2, 3]], + names=["first", "second"], + ) + + df1 = DataFrame( + data=np.random.default_rng(2).standard_normal(6), + index=index1, + columns=["var X"], + ) + df2 = DataFrame( + data=np.random.default_rng(2).standard_normal(6), + index=index2, + columns=["var Y"], + ) + + df1 = df1.sort_index(level=0) + df2 = df2.sort_index(level=0) + + joined = df1.join(df2, how="outer") + ex_index = Index(index1.values).union(Index(index2.values)) + expected = df1.reindex(ex_index).join(df2.reindex(ex_index)) + expected.index.names = index1.names + tm.assert_frame_equal(joined, expected) + assert joined.index.names == index1.names + + df1 = df1.sort_index(level=1) + df2 = df2.sort_index(level=1) + + joined = df1.join(df2, how="outer").sort_index(level=0) + ex_index = Index(index1.values).union(Index(index2.values)) + expected = df1.reindex(ex_index).join(df2.reindex(ex_index)) + expected.index.names = index1.names + + tm.assert_frame_equal(joined, expected) + assert joined.index.names == index1.names + + def test_join_inner_multiindex(self, lexsorted_two_level_string_multiindex): + key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"] + key2 = [ + "two", + "one", + "three", + "one", + "two", + "one", + "two", + "two", + "three", + "one", + ] + + data = np.random.default_rng(2).standard_normal(len(key1)) + data = DataFrame({"key1": key1, "key2": key2, "data": data}) + + index = lexsorted_two_level_string_multiindex + to_join = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), + index=index, + columns=["j_one", "j_two", "j_three"], + ) + + joined = data.join(to_join, on=["key1", "key2"], how="inner") + expected = merge( + data, + to_join.reset_index(), + left_on=["key1", "key2"], + right_on=["first", "second"], + how="inner", + sort=False, + ) + + expected2 = merge( + to_join, + data, + right_on=["key1", "key2"], + left_index=True, + how="inner", + sort=False, + ) + tm.assert_frame_equal(joined, expected2.reindex_like(joined)) + + expected2 = merge( + to_join, + data, + right_on=["key1", "key2"], + left_index=True, + how="inner", + sort=False, + ) + + expected = expected.drop(["first", "second"], axis=1) + expected.index = joined.index + + assert joined.index.is_monotonic_increasing + tm.assert_frame_equal(joined, expected) + + # _assert_same_contents(expected, expected2.loc[:, expected.columns]) + + def test_join_hierarchical_mixed_raises(self): + # GH 2024 + # GH 40993: For raising, enforced in 2.0 + df = DataFrame([(1, 2, 3), (4, 5, 6)], columns=["a", "b", "c"]) + new_df = df.groupby(["a"]).agg({"b": ["mean", "sum"]}) + other_df = DataFrame([(1, 2, 3), (7, 10, 6)], columns=["a", "b", "d"]) + other_df.set_index("a", inplace=True) + # GH 9455, 12219 + with pytest.raises( + pd.errors.MergeError, match="Not allowed to merge between different levels" + ): + merge(new_df, other_df, left_index=True, right_index=True) + + def test_join_float64_float32(self): + a = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + columns=["a", "b"], + dtype=np.float64, + ) + b = DataFrame( + np.random.default_rng(2).standard_normal((10, 1)), + columns=["c"], + dtype=np.float32, + ) + joined = a.join(b) + assert joined.dtypes["a"] == "float64" + assert joined.dtypes["b"] == "float64" + assert joined.dtypes["c"] == "float32" + + a = np.random.default_rng(2).integers(0, 5, 100).astype("int64") + b = np.random.default_rng(2).random(100).astype("float64") + c = np.random.default_rng(2).random(100).astype("float32") + df = DataFrame({"a": a, "b": b, "c": c}) + xpdf = DataFrame({"a": a, "b": b, "c": c}) + s = DataFrame( + np.random.default_rng(2).random(5).astype("float32"), columns=["md"] + ) + rs = df.merge(s, left_on="a", right_index=True) + assert rs.dtypes["a"] == "int64" + assert rs.dtypes["b"] == "float64" + assert rs.dtypes["c"] == "float32" + assert rs.dtypes["md"] == "float32" + + xp = xpdf.merge(s, left_on="a", right_index=True) + tm.assert_frame_equal(rs, xp) + + def test_join_many_non_unique_index(self): + df1 = DataFrame({"a": [1, 1], "b": [1, 1], "c": [10, 20]}) + df2 = DataFrame({"a": [1, 1], "b": [1, 2], "d": [100, 200]}) + df3 = DataFrame({"a": [1, 1], "b": [1, 2], "e": [1000, 2000]}) + idf1 = df1.set_index(["a", "b"]) + idf2 = df2.set_index(["a", "b"]) + idf3 = df3.set_index(["a", "b"]) + + result = idf1.join([idf2, idf3], how="outer") + + df_partially_merged = merge(df1, df2, on=["a", "b"], how="outer") + expected = merge(df_partially_merged, df3, on=["a", "b"], how="outer") + + result = result.reset_index() + expected = expected[result.columns] + expected["a"] = expected.a.astype("int64") + expected["b"] = expected.b.astype("int64") + tm.assert_frame_equal(result, expected) + + df1 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 1], "c": [10, 20, 30]}) + df2 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "d": [100, 200, 300]}) + df3 = DataFrame({"a": [1, 1, 1], "b": [1, 1, 2], "e": [1000, 2000, 3000]}) + idf1 = df1.set_index(["a", "b"]) + idf2 = df2.set_index(["a", "b"]) + idf3 = df3.set_index(["a", "b"]) + result = idf1.join([idf2, idf3], how="inner") + + df_partially_merged = merge(df1, df2, on=["a", "b"], how="inner") + expected = merge(df_partially_merged, df3, on=["a", "b"], how="inner") + + result = result.reset_index() + + tm.assert_frame_equal(result, expected.loc[:, result.columns]) + + # GH 11519 + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + s = Series( + np.repeat(np.arange(8), 2), index=np.repeat(np.arange(8), 2), name="TEST" + ) + inner = df.join(s, how="inner") + outer = df.join(s, how="outer") + left = df.join(s, how="left") + right = df.join(s, how="right") + tm.assert_frame_equal(inner, outer) + tm.assert_frame_equal(inner, left) + tm.assert_frame_equal(inner, right) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + def test_join_sort(self, infer_string): + with option_context("future.infer_string", infer_string): + left = DataFrame( + {"key": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]} + ) + right = DataFrame({"value2": ["a", "b", "c"]}, index=["bar", "baz", "foo"]) + + joined = left.join(right, on="key", sort=True) + expected = DataFrame( + { + "key": ["bar", "baz", "foo", "foo"], + "value": [2, 3, 1, 4], + "value2": ["a", "b", "c", "c"], + }, + index=[1, 2, 0, 3], + ) + tm.assert_frame_equal(joined, expected) + + # smoke test + joined = left.join(right, on="key", sort=False) + tm.assert_index_equal(joined.index, Index(range(4)), exact=True) + + def test_join_mixed_non_unique_index(self): + # GH 12814, unorderable types in py3 with a non-unique index + df1 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 3, "a"]) + df2 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 3, 3, 4]) + result = df1.join(df2) + expected = DataFrame( + {"a": [1, 2, 3, 3, 4], "b": [5, np.nan, 6, 7, np.nan]}, + index=[1, 2, 3, 3, "a"], + ) + tm.assert_frame_equal(result, expected) + + df3 = DataFrame({"a": [1, 2, 3, 4]}, index=[1, 2, 2, "a"]) + df4 = DataFrame({"b": [5, 6, 7, 8]}, index=[1, 2, 3, 4]) + result = df3.join(df4) + expected = DataFrame( + {"a": [1, 2, 3, 4], "b": [5, 6, 6, np.nan]}, index=[1, 2, 2, "a"] + ) + tm.assert_frame_equal(result, expected) + + def test_join_non_unique_period_index(self): + # GH #16871 + index = pd.period_range("2016-01-01", periods=16, freq="M") + df = DataFrame(list(range(len(index))), index=index, columns=["pnum"]) + df2 = concat([df, df]) + result = df.join(df2, how="inner", rsuffix="_df2") + expected = DataFrame( + np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2), + columns=["pnum", "pnum_df2"], + index=df2.sort_index().index, + ) + tm.assert_frame_equal(result, expected) + + def test_mixed_type_join_with_suffix(self): + # GH #916 + df = DataFrame( + np.random.default_rng(2).standard_normal((20, 6)), + columns=["a", "b", "c", "d", "e", "f"], + ) + df.insert(0, "id", 0) + df.insert(5, "dt", "foo") + + grouped = df.groupby("id") + msg = re.escape("agg function failed [how->mean,dtype->") + with pytest.raises(TypeError, match=msg): + grouped.mean() + mn = grouped.mean(numeric_only=True) + cn = grouped.count() + + # it works! + mn.join(cn, rsuffix="_right") + + def test_join_many(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 6)), columns=list("abcdef") + ) + df_list = [df[["a", "b"]], df[["c", "d"]], df[["e", "f"]]] + + joined = df_list[0].join(df_list[1:]) + tm.assert_frame_equal(joined, df) + + df_list = [df[["a", "b"]][:-2], df[["c", "d"]][2:], df[["e", "f"]][1:9]] + + def _check_diff_index(df_list, result, exp_index): + reindexed = [x.reindex(exp_index) for x in df_list] + expected = reindexed[0].join(reindexed[1:]) + tm.assert_frame_equal(result, expected) + + # different join types + joined = df_list[0].join(df_list[1:], how="outer") + _check_diff_index(df_list, joined, df.index) + + joined = df_list[0].join(df_list[1:]) + _check_diff_index(df_list, joined, df_list[0].index) + + joined = df_list[0].join(df_list[1:], how="inner") + _check_diff_index(df_list, joined, df.index[2:8]) + + msg = "Joining multiple DataFrames only supported for joining on index" + with pytest.raises(ValueError, match=msg): + df_list[0].join(df_list[1:], on="a") + + def test_join_many_mixed(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 4)), + columns=["A", "B", "C", "D"], + ) + df["key"] = ["foo", "bar"] * 4 + df1 = df.loc[:, ["A", "B"]] + df2 = df.loc[:, ["C", "D"]] + df3 = df.loc[:, ["key"]] + + result = df1.join([df2, df3]) + tm.assert_frame_equal(result, df) + + def test_join_dups(self): + # joining dups + df = concat( + [ + DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "A", "B", "B"], + ), + DataFrame( + np.random.default_rng(2).integers(0, 10, size=20).reshape(10, 2), + columns=["A", "C"], + ), + ], + axis=1, + ) + + expected = concat([df, df], axis=1) + result = df.join(df, rsuffix="_2") + result.columns = expected.columns + tm.assert_frame_equal(result, expected) + + # GH 4975, invalid join on dups + w = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"] + ) + x = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"] + ) + y = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"] + ) + z = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), columns=["x", "y"] + ) + + dta = x.merge(y, left_index=True, right_index=True).merge( + z, left_index=True, right_index=True, how="outer" + ) + # GH 40991: As of 2.0 causes duplicate columns + with pytest.raises( + pd.errors.MergeError, + match="Passing 'suffixes' which cause duplicate columns", + ): + dta.merge(w, left_index=True, right_index=True) + + def test_join_multi_to_multi(self, join_type): + # GH 20475 + leftindex = MultiIndex.from_product( + [list("abc"), list("xy"), [1, 2]], names=["abc", "xy", "num"] + ) + left = DataFrame({"v1": range(12)}, index=leftindex) + + rightindex = MultiIndex.from_product( + [list("abc"), list("xy")], names=["abc", "xy"] + ) + right = DataFrame({"v2": [100 * i for i in range(1, 7)]}, index=rightindex) + + result = left.join(right, on=["abc", "xy"], how=join_type) + expected = ( + left.reset_index() + .merge(right.reset_index(), on=["abc", "xy"], how=join_type) + .set_index(["abc", "xy", "num"]) + ) + tm.assert_frame_equal(expected, result) + + msg = r'len\(left_on\) must equal the number of levels in the index of "right"' + with pytest.raises(ValueError, match=msg): + left.join(right, on="xy", how=join_type) + + with pytest.raises(ValueError, match=msg): + right.join(left, on=["abc", "xy"], how=join_type) + + def test_join_on_tz_aware_datetimeindex(self): + # GH 23931, 26335 + df1 = DataFrame( + { + "date": pd.date_range( + start="2018-01-01", periods=5, tz="America/Chicago" + ), + "vals": list("abcde"), + } + ) + + df2 = DataFrame( + { + "date": pd.date_range( + start="2018-01-03", periods=5, tz="America/Chicago" + ), + "vals_2": list("tuvwx"), + } + ) + result = df1.join(df2.set_index("date"), on="date") + expected = df1.copy() + expected["vals_2"] = Series([np.nan] * 2 + list("tuv")) + tm.assert_frame_equal(result, expected) + + def test_join_datetime_string(self): + # GH 5647 + dfa = DataFrame( + [ + ["2012-08-02", "L", 10], + ["2012-08-02", "J", 15], + ["2013-04-06", "L", 20], + ["2013-04-06", "J", 25], + ], + columns=["x", "y", "a"], + ) + dfa["x"] = pd.to_datetime(dfa["x"]).astype("M8[ns]") + dfb = DataFrame( + [["2012-08-02", "J", 1], ["2013-04-06", "L", 2]], + columns=["x", "y", "z"], + index=[2, 4], + ) + dfb["x"] = pd.to_datetime(dfb["x"]).astype("M8[ns]") + result = dfb.join(dfa.set_index(["x", "y"]), on=["x", "y"]) + expected = DataFrame( + [ + [Timestamp("2012-08-02 00:00:00"), "J", 1, 15], + [Timestamp("2013-04-06 00:00:00"), "L", 2, 20], + ], + index=[2, 4], + columns=["x", "y", "z", "a"], + ) + expected["x"] = expected["x"].astype("M8[ns]") + tm.assert_frame_equal(result, expected) + + def test_join_with_categorical_index(self): + # GH47812 + ix = ["a", "b"] + id1 = pd.CategoricalIndex(ix, categories=ix) + id2 = pd.CategoricalIndex(reversed(ix), categories=reversed(ix)) + + df1 = DataFrame({"c1": ix}, index=id1) + df2 = DataFrame({"c2": reversed(ix)}, index=id2) + result = df1.join(df2) + expected = DataFrame( + {"c1": ["a", "b"], "c2": ["a", "b"]}, + index=pd.CategoricalIndex(["a", "b"], categories=["a", "b"]), + ) + tm.assert_frame_equal(result, expected) + + +def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix="_y"): + # some smoke tests + for c in join_col: + assert result[c].notna().all() + + left_grouped = left.groupby(join_col) + right_grouped = right.groupby(join_col) + + for group_key, group in result.groupby(join_col): + l_joined = _restrict_to_columns(group, left.columns, lsuffix) + r_joined = _restrict_to_columns(group, right.columns, rsuffix) + + try: + lgroup = left_grouped.get_group(group_key) + except KeyError as err: + if how in ("left", "inner"): + raise AssertionError( + f"key {group_key} should not have been in the join" + ) from err + + _assert_all_na(l_joined, left.columns, join_col) + else: + _assert_same_contents(l_joined, lgroup) + + try: + rgroup = right_grouped.get_group(group_key) + except KeyError as err: + if how in ("right", "inner"): + raise AssertionError( + f"key {group_key} should not have been in the join" + ) from err + + _assert_all_na(r_joined, right.columns, join_col) + else: + _assert_same_contents(r_joined, rgroup) + + +def _restrict_to_columns(group, columns, suffix): + found = [ + c for c in group.columns if c in columns or c.replace(suffix, "") in columns + ] + + # filter + group = group.loc[:, found] + + # get rid of suffixes, if any + group = group.rename(columns=lambda x: x.replace(suffix, "")) + + # put in the right order... + group = group.loc[:, columns] + + return group + + +def _assert_same_contents(join_chunk, source): + NA_SENTINEL = -1234567 # drop_duplicates not so NA-friendly... + + jvalues = join_chunk.fillna(NA_SENTINEL).drop_duplicates().values + svalues = source.fillna(NA_SENTINEL).drop_duplicates().values + + rows = {tuple(row) for row in jvalues} + assert len(rows) == len(source) + assert all(tuple(row) in rows for row in svalues) + + +def _assert_all_na(join_chunk, source_columns, join_col): + for c in source_columns: + if c in join_col: + continue + assert join_chunk[c].isna().all() + + +def _join_by_hand(a, b, how="left"): + join_index = a.index.join(b.index, how=how) + + a_re = a.reindex(join_index) + b_re = b.reindex(join_index) + + result_columns = a.columns.append(b.columns) + + for col, s in b_re.items(): + a_re[col] = s + return a_re.reindex(columns=result_columns) + + +def test_join_inner_multiindex_deterministic_order(): + # GH: 36910 + left = DataFrame( + data={"e": 5}, + index=MultiIndex.from_tuples([(1, 2, 4)], names=("a", "b", "d")), + ) + right = DataFrame( + data={"f": 6}, index=MultiIndex.from_tuples([(2, 3)], names=("b", "c")) + ) + result = left.join(right, how="inner") + expected = DataFrame( + {"e": [5], "f": [6]}, + index=MultiIndex.from_tuples([(1, 2, 4, 3)], names=("a", "b", "d", "c")), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + ("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])] +) +def test_join_cross(input_col, output_cols): + # GH#5401 + left = DataFrame({"a": [1, 3]}) + right = DataFrame({input_col: [3, 4]}) + result = left.join(right, how="cross", lsuffix="_x", rsuffix="_y") + expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]}) + tm.assert_frame_equal(result, expected) + + +def test_join_multiindex_one_level(join_type): + # GH#36909 + left = DataFrame( + data={"c": 3}, index=MultiIndex.from_tuples([(1, 2)], names=("a", "b")) + ) + right = DataFrame(data={"d": 4}, index=MultiIndex.from_tuples([(2,)], names=("b",))) + result = left.join(right, how=join_type) + if join_type == "right": + expected = DataFrame( + {"c": [3], "d": [4]}, + index=MultiIndex.from_tuples([(2, 1)], names=["b", "a"]), + ) + else: + expected = DataFrame( + {"c": [3], "d": [4]}, + index=MultiIndex.from_tuples([(1, 2)], names=["a", "b"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "categories, values", + [ + (["Y", "X"], ["Y", "X", "X"]), + ([2, 1], [2, 1, 1]), + ([2.5, 1.5], [2.5, 1.5, 1.5]), + ( + [Timestamp("2020-12-31"), Timestamp("2019-12-31")], + [Timestamp("2020-12-31"), Timestamp("2019-12-31"), Timestamp("2019-12-31")], + ), + ], +) +def test_join_multiindex_not_alphabetical_categorical(categories, values): + # GH#38502 + left = DataFrame( + { + "first": ["A", "A"], + "second": Categorical(categories, categories=categories), + "value": [1, 2], + } + ).set_index(["first", "second"]) + right = DataFrame( + { + "first": ["A", "A", "B"], + "second": Categorical(values, categories=categories), + "value": [3, 4, 5], + } + ).set_index(["first", "second"]) + result = left.join(right, lsuffix="_left", rsuffix="_right") + + expected = DataFrame( + { + "first": ["A", "A"], + "second": Categorical(categories, categories=categories), + "value_left": [1, 2], + "value_right": [3, 4], + } + ).set_index(["first", "second"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "left_empty, how, exp", + [ + (False, "left", "left"), + (False, "right", "empty"), + (False, "inner", "empty"), + (False, "outer", "left"), + (False, "cross", "empty"), + (True, "left", "empty"), + (True, "right", "right"), + (True, "inner", "empty"), + (True, "outer", "right"), + (True, "cross", "empty"), + ], +) +def test_join_empty(left_empty, how, exp): + left = DataFrame({"A": [2, 1], "B": [3, 4]}, dtype="int64").set_index("A") + right = DataFrame({"A": [1], "C": [5]}, dtype="int64").set_index("A") + + if left_empty: + left = left.head(0) + else: + right = right.head(0) + + result = left.join(right, how=how) + + if exp == "left": + expected = DataFrame({"A": [2, 1], "B": [3, 4], "C": [np.nan, np.nan]}) + expected = expected.set_index("A") + elif exp == "right": + expected = DataFrame({"B": [np.nan], "A": [1], "C": [5]}) + expected = expected.set_index("A") + elif exp == "empty": + expected = DataFrame(columns=["B", "C"], dtype="int64") + if how != "cross": + expected = expected.rename_axis("A") + if how == "outer": + expected = expected.sort_index() + + tm.assert_frame_equal(result, expected) + + +def test_join_empty_uncomparable_columns(): + # GH 57048 + df1 = DataFrame() + df2 = DataFrame(columns=["test"]) + df3 = DataFrame(columns=["foo", ("bar", "baz")]) + + result = df1 + df2 + expected = DataFrame(columns=["test"]) + tm.assert_frame_equal(result, expected) + + result = df2 + df3 + expected = DataFrame(columns=[("bar", "baz"), "foo", "test"]) + tm.assert_frame_equal(result, expected) + + result = df1 + df3 + expected = DataFrame(columns=[("bar", "baz"), "foo"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "how, values", + [ + ("inner", [0, 1, 2]), + ("outer", [0, 1, 2]), + ("left", [0, 1, 2]), + ("right", [0, 2, 1]), + ], +) +def test_join_multiindex_categorical_output_index_dtype(how, values): + # GH#50906 + df1 = DataFrame( + { + "a": Categorical([0, 1, 2]), + "b": Categorical([0, 1, 2]), + "c": [0, 1, 2], + } + ).set_index(["a", "b"]) + + df2 = DataFrame( + { + "a": Categorical([0, 2, 1]), + "b": Categorical([0, 2, 1]), + "d": [0, 2, 1], + } + ).set_index(["a", "b"]) + + expected = DataFrame( + { + "a": Categorical(values), + "b": Categorical(values), + "c": values, + "d": values, + } + ).set_index(["a", "b"]) + + result = df1.join(df2, how=how) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge.py new file mode 100644 index 0000000000000000000000000000000000000000..ed49f3b758cc5cd826a804c8de611681a3e0343c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge.py @@ -0,0 +1,3020 @@ +from datetime import ( + date, + datetime, + timedelta, +) +import re + +import numpy as np +import pytest + +from pandas.core.dtypes.common import ( + is_object_dtype, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.core.reshape.concat import concat +from pandas.core.reshape.merge import ( + MergeError, + merge, +) + + +def get_test_data(ngroups=8, n=50): + unique_groups = list(range(ngroups)) + arr = np.asarray(np.tile(unique_groups, n // ngroups)) + + if len(arr) < n: + arr = np.asarray(list(arr) + unique_groups[: n - len(arr)]) + + np.random.default_rng(2).shuffle(arr) + return arr + + +def get_series(): + return [ + Series([1], dtype="int64"), + Series([1], dtype="Int64"), + Series([1.23]), + Series(["foo"]), + Series([True]), + Series([pd.Timestamp("2018-01-01")]), + Series([pd.Timestamp("2018-01-01", tz="US/Eastern")]), + ] + + +def get_series_na(): + return [ + Series([np.nan], dtype="Int64"), + Series([np.nan], dtype="float"), + Series([np.nan], dtype="object"), + Series([pd.NaT]), + ] + + +@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name) +def series_of_dtype(request): + """ + A parametrized fixture returning a variety of Series of different + dtypes + """ + return request.param + + +@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name) +def series_of_dtype2(request): + """ + A duplicate of the series_of_dtype fixture, so that it can be used + twice by a single function + """ + return request.param + + +@pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name) +def series_of_dtype_all_na(request): + """ + A parametrized fixture returning a variety of Series with all NA + values + """ + return request.param + + +@pytest.fixture +def dfs_for_indicator(): + df1 = DataFrame({"col1": [0, 1], "col_conflict": [1, 2], "col_left": ["a", "b"]}) + df2 = DataFrame( + { + "col1": [1, 2, 3, 4, 5], + "col_conflict": [1, 2, 3, 4, 5], + "col_right": [2, 2, 2, 2, 2], + } + ) + return df1, df2 + + +class TestMerge: + @pytest.fixture + def df(self): + df = DataFrame( + { + "key1": get_test_data(), + "key2": get_test_data(), + "data1": np.random.default_rng(2).standard_normal(50), + "data2": np.random.default_rng(2).standard_normal(50), + } + ) + + # exclude a couple keys for fun + df = df[df["key2"] > 1] + return df + + @pytest.fixture + def df2(self): + return DataFrame( + { + "key1": get_test_data(n=10), + "key2": get_test_data(ngroups=4, n=10), + "value": np.random.default_rng(2).standard_normal(10), + } + ) + + @pytest.fixture + def left(self): + return DataFrame( + { + "key": ["a", "b", "c", "d", "e", "e", "a"], + "v1": np.random.default_rng(2).standard_normal(7), + } + ) + + @pytest.fixture + def right(self): + return DataFrame( + {"v2": np.random.default_rng(2).standard_normal(4)}, + index=["d", "b", "c", "a"], + ) + + def test_merge_inner_join_empty(self): + # GH 15328 + df_empty = DataFrame() + df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64") + result = merge(df_empty, df_a, left_index=True, right_index=True) + expected = DataFrame({"a": []}, dtype="int64") + tm.assert_frame_equal(result, expected) + + def test_merge_common(self, df, df2): + joined = merge(df, df2) + exp = merge(df, df2, on=["key1", "key2"]) + tm.assert_frame_equal(joined, exp) + + def test_merge_non_string_columns(self): + # https://github.com/pandas-dev/pandas/issues/17962 + # Checks that method runs for non string column names + left = DataFrame( + {0: [1, 0, 1, 0], 1: [0, 1, 0, 0], 2: [0, 0, 2, 0], 3: [1, 0, 0, 3]} + ) + + right = left.astype(float) + expected = left + result = merge(left, right) + tm.assert_frame_equal(expected, result) + + def test_merge_index_as_on_arg(self, df, df2): + # GH14355 + + left = df.set_index("key1") + right = df2.set_index("key1") + result = merge(left, right, on="key1") + expected = merge(df, df2, on="key1").set_index("key1") + tm.assert_frame_equal(result, expected) + + def test_merge_index_singlekey_right_vs_left(self): + left = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "e", "a"], + "v1": np.random.default_rng(2).standard_normal(7), + } + ) + right = DataFrame( + {"v2": np.random.default_rng(2).standard_normal(4)}, + index=["d", "b", "c", "a"], + ) + + merged1 = merge( + left, right, left_on="key", right_index=True, how="left", sort=False + ) + merged2 = merge( + right, left, right_on="key", left_index=True, how="right", sort=False + ) + tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns]) + + merged1 = merge( + left, right, left_on="key", right_index=True, how="left", sort=True + ) + merged2 = merge( + right, left, right_on="key", left_index=True, how="right", sort=True + ) + tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns]) + + def test_merge_index_singlekey_inner(self): + left = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "e", "a"], + "v1": np.random.default_rng(2).standard_normal(7), + } + ) + right = DataFrame( + {"v2": np.random.default_rng(2).standard_normal(4)}, + index=["d", "b", "c", "a"], + ) + + # inner join + result = merge(left, right, left_on="key", right_index=True, how="inner") + expected = left.join(right, on="key").loc[result.index] + tm.assert_frame_equal(result, expected) + + result = merge(right, left, right_on="key", left_index=True, how="inner") + expected = left.join(right, on="key").loc[result.index] + tm.assert_frame_equal(result, expected.loc[:, result.columns]) + + def test_merge_misspecified(self, df, df2, left, right): + msg = "Must pass right_on or right_index=True" + with pytest.raises(pd.errors.MergeError, match=msg): + merge(left, right, left_index=True) + msg = "Must pass left_on or left_index=True" + with pytest.raises(pd.errors.MergeError, match=msg): + merge(left, right, right_index=True) + + msg = ( + 'Can only pass argument "on" OR "left_on" and "right_on", not ' + "a combination of both" + ) + with pytest.raises(pd.errors.MergeError, match=msg): + merge(left, left, left_on="key", on="key") + + msg = r"len\(right_on\) must equal len\(left_on\)" + with pytest.raises(ValueError, match=msg): + merge(df, df2, left_on=["key1"], right_on=["key1", "key2"]) + + def test_index_and_on_parameters_confusion(self, df, df2): + msg = "right_index parameter must be of type bool, not " + with pytest.raises(ValueError, match=msg): + merge( + df, + df2, + how="left", + left_index=False, + right_index=["key1", "key2"], + ) + msg = "left_index parameter must be of type bool, not " + with pytest.raises(ValueError, match=msg): + merge( + df, + df2, + how="left", + left_index=["key1", "key2"], + right_index=False, + ) + with pytest.raises(ValueError, match=msg): + merge( + df, + df2, + how="left", + left_index=["key1", "key2"], + right_index=["key1", "key2"], + ) + + def test_merge_overlap(self, left): + merged = merge(left, left, on="key") + exp_len = (left["key"].value_counts() ** 2).sum() + assert len(merged) == exp_len + assert "v1_x" in merged + assert "v1_y" in merged + + def test_merge_different_column_key_names(self): + left = DataFrame({"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]}) + right = DataFrame({"rkey": ["foo", "bar", "qux", "foo"], "value": [5, 6, 7, 8]}) + + merged = left.merge( + right, left_on="lkey", right_on="rkey", how="outer", sort=True + ) + + exp = Series(["bar", "baz", "foo", "foo", "foo", "foo", np.nan], name="lkey") + tm.assert_series_equal(merged["lkey"], exp) + + exp = Series(["bar", np.nan, "foo", "foo", "foo", "foo", "qux"], name="rkey") + tm.assert_series_equal(merged["rkey"], exp) + + exp = Series([2, 3, 1, 1, 4, 4, np.nan], name="value_x") + tm.assert_series_equal(merged["value_x"], exp) + + exp = Series([6, np.nan, 5, 8, 5, 8, 7], name="value_y") + tm.assert_series_equal(merged["value_y"], exp) + + def test_merge_copy(self): + left = DataFrame({"a": 0, "b": 1}, index=range(10)) + right = DataFrame({"c": "foo", "d": "bar"}, index=range(10)) + + merged = merge(left, right, left_index=True, right_index=True, copy=True) + + merged["a"] = 6 + assert (left["a"] == 0).all() + + merged["d"] = "peekaboo" + assert (right["d"] == "bar").all() + + def test_merge_nocopy(self, using_array_manager, using_infer_string): + left = DataFrame({"a": 0, "b": 1}, index=range(10)) + right = DataFrame({"c": "foo", "d": "bar"}, index=range(10)) + + merged = merge(left, right, left_index=True, right_index=True, copy=False) + + assert np.shares_memory(merged["a"]._values, left["a"]._values) + if not using_infer_string: + assert np.shares_memory(merged["d"]._values, right["d"]._values) + + def test_intelligently_handle_join_key(self): + # #733, be a bit more 1337 about not returning unconsolidated DataFrame + + left = DataFrame( + {"key": [1, 1, 2, 2, 3], "value": list(range(5))}, columns=["value", "key"] + ) + right = DataFrame({"key": [1, 1, 2, 3, 4, 5], "rvalue": list(range(6))}) + + joined = merge(left, right, on="key", how="outer") + expected = DataFrame( + { + "key": [1, 1, 1, 1, 2, 2, 3, 4, 5], + "value": np.array([0, 0, 1, 1, 2, 3, 4, np.nan, np.nan]), + "rvalue": [0, 1, 0, 1, 2, 2, 3, 4, 5], + }, + columns=["value", "key", "rvalue"], + ) + tm.assert_frame_equal(joined, expected) + + def test_merge_join_key_dtype_cast(self): + # #8596 + + df1 = DataFrame({"key": [1], "v1": [10]}) + df2 = DataFrame({"key": [2], "v1": [20]}) + df = merge(df1, df2, how="outer") + assert df["key"].dtype == "int64" + + df1 = DataFrame({"key": [True], "v1": [1]}) + df2 = DataFrame({"key": [False], "v1": [0]}) + df = merge(df1, df2, how="outer") + + # GH13169 + # GH#40073 + assert df["key"].dtype == "bool" + + df1 = DataFrame({"val": [1]}) + df2 = DataFrame({"val": [2]}) + lkey = np.array([1]) + rkey = np.array([2]) + df = merge(df1, df2, left_on=lkey, right_on=rkey, how="outer") + assert df["key_0"].dtype == np.dtype(int) + + def test_handle_join_key_pass_array(self): + left = DataFrame( + {"key": [1, 1, 2, 2, 3], "value": np.arange(5)}, + columns=["value", "key"], + dtype="int64", + ) + right = DataFrame({"rvalue": np.arange(6)}, dtype="int64") + key = np.array([1, 1, 2, 3, 4, 5], dtype="int64") + + merged = merge(left, right, left_on="key", right_on=key, how="outer") + merged2 = merge(right, left, left_on=key, right_on="key", how="outer") + + tm.assert_series_equal(merged["key"], merged2["key"]) + assert merged["key"].notna().all() + assert merged2["key"].notna().all() + + left = DataFrame({"value": np.arange(5)}, columns=["value"]) + right = DataFrame({"rvalue": np.arange(6)}) + lkey = np.array([1, 1, 2, 2, 3]) + rkey = np.array([1, 1, 2, 3, 4, 5]) + + merged = merge(left, right, left_on=lkey, right_on=rkey, how="outer") + expected = Series([1, 1, 1, 1, 2, 2, 3, 4, 5], dtype=int, name="key_0") + tm.assert_series_equal(merged["key_0"], expected) + + left = DataFrame({"value": np.arange(3)}) + right = DataFrame({"rvalue": np.arange(6)}) + + key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64) + merged = merge(left, right, left_index=True, right_on=key, how="outer") + tm.assert_series_equal(merged["key_0"], Series(key, name="key_0")) + + def test_no_overlap_more_informative_error(self): + dt = datetime.now() + df1 = DataFrame({"x": ["a"]}, index=[dt]) + + df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt]) + + msg = ( + "No common columns to perform merge on. " + f"Merge options: left_on={None}, right_on={None}, " + f"left_index={False}, right_index={False}" + ) + + with pytest.raises(MergeError, match=msg): + merge(df1, df2) + + def test_merge_non_unique_indexes(self): + dt = datetime(2012, 5, 1) + dt2 = datetime(2012, 5, 2) + dt3 = datetime(2012, 5, 3) + dt4 = datetime(2012, 5, 4) + + df1 = DataFrame({"x": ["a"]}, index=[dt]) + df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt]) + _check_merge(df1, df2) + + # Not monotonic + df1 = DataFrame({"x": ["a", "b", "q"]}, index=[dt2, dt, dt4]) + df2 = DataFrame( + {"y": ["c", "d", "e", "f", "g", "h"]}, index=[dt3, dt3, dt2, dt2, dt, dt] + ) + _check_merge(df1, df2) + + df1 = DataFrame({"x": ["a", "b"]}, index=[dt, dt]) + df2 = DataFrame({"y": ["c", "d"]}, index=[dt, dt]) + _check_merge(df1, df2) + + def test_merge_non_unique_index_many_to_many(self): + dt = datetime(2012, 5, 1) + dt2 = datetime(2012, 5, 2) + dt3 = datetime(2012, 5, 3) + df1 = DataFrame({"x": ["a", "b", "c", "d"]}, index=[dt2, dt2, dt, dt]) + df2 = DataFrame( + {"y": ["e", "f", "g", " h", "i"]}, index=[dt2, dt2, dt3, dt, dt] + ) + _check_merge(df1, df2) + + def test_left_merge_empty_dataframe(self): + left = DataFrame({"key": [1], "value": [2]}) + right = DataFrame({"key": []}) + + result = merge(left, right, on="key", how="left") + tm.assert_frame_equal(result, left) + + result = merge(right, left, on="key", how="right") + tm.assert_frame_equal(result, left) + + @pytest.mark.parametrize("how", ["inner", "left", "right", "outer"]) + def test_merge_empty_dataframe(self, index, how): + # GH52777 + left = DataFrame([], index=index[:0]) + right = left.copy() + + result = left.join(right, how=how) + tm.assert_frame_equal(result, left) + + @pytest.mark.parametrize( + "kwarg", + [ + {"left_index": True, "right_index": True}, + {"left_index": True, "right_on": "x"}, + {"left_on": "a", "right_index": True}, + {"left_on": "a", "right_on": "x"}, + ], + ) + def test_merge_left_empty_right_empty(self, join_type, kwarg): + # GH 10824 + left = DataFrame(columns=["a", "b", "c"]) + right = DataFrame(columns=["x", "y", "z"]) + + exp_in = DataFrame(columns=["a", "b", "c", "x", "y", "z"], dtype=object) + + result = merge(left, right, how=join_type, **kwarg) + tm.assert_frame_equal(result, exp_in) + + def test_merge_left_empty_right_notempty(self): + # GH 10824 + left = DataFrame(columns=["a", "b", "c"]) + right = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["x", "y", "z"]) + + exp_out = DataFrame( + { + "a": np.array([np.nan] * 3, dtype=object), + "b": np.array([np.nan] * 3, dtype=object), + "c": np.array([np.nan] * 3, dtype=object), + "x": [1, 4, 7], + "y": [2, 5, 8], + "z": [3, 6, 9], + }, + columns=["a", "b", "c", "x", "y", "z"], + ) + exp_in = exp_out[0:0] # make empty DataFrame keeping dtype + + def check1(exp, kwarg): + result = merge(left, right, how="inner", **kwarg) + tm.assert_frame_equal(result, exp) + result = merge(left, right, how="left", **kwarg) + tm.assert_frame_equal(result, exp) + + def check2(exp, kwarg): + result = merge(left, right, how="right", **kwarg) + tm.assert_frame_equal(result, exp) + result = merge(left, right, how="outer", **kwarg) + tm.assert_frame_equal(result, exp) + + for kwarg in [ + {"left_index": True, "right_index": True}, + {"left_index": True, "right_on": "x"}, + ]: + check1(exp_in, kwarg) + check2(exp_out, kwarg) + + kwarg = {"left_on": "a", "right_index": True} + check1(exp_in, kwarg) + exp_out["a"] = [0, 1, 2] + check2(exp_out, kwarg) + + kwarg = {"left_on": "a", "right_on": "x"} + check1(exp_in, kwarg) + exp_out["a"] = np.array([np.nan] * 3, dtype=object) + check2(exp_out, kwarg) + + def test_merge_left_notempty_right_empty(self): + # GH 10824 + left = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]) + right = DataFrame(columns=["x", "y", "z"]) + + exp_out = DataFrame( + { + "a": [1, 4, 7], + "b": [2, 5, 8], + "c": [3, 6, 9], + "x": np.array([np.nan] * 3, dtype=object), + "y": np.array([np.nan] * 3, dtype=object), + "z": np.array([np.nan] * 3, dtype=object), + }, + columns=["a", "b", "c", "x", "y", "z"], + ) + exp_in = exp_out[0:0] # make empty DataFrame keeping dtype + # result will have object dtype + exp_in.index = exp_in.index.astype(object) + + def check1(exp, kwarg): + result = merge(left, right, how="inner", **kwarg) + tm.assert_frame_equal(result, exp) + result = merge(left, right, how="right", **kwarg) + tm.assert_frame_equal(result, exp) + + def check2(exp, kwarg): + result = merge(left, right, how="left", **kwarg) + tm.assert_frame_equal(result, exp) + result = merge(left, right, how="outer", **kwarg) + tm.assert_frame_equal(result, exp) + + # TODO: should the next loop be un-indented? doing so breaks this test + for kwarg in [ + {"left_index": True, "right_index": True}, + {"left_index": True, "right_on": "x"}, + {"left_on": "a", "right_index": True}, + {"left_on": "a", "right_on": "x"}, + ]: + check1(exp_in, kwarg) + check2(exp_out, kwarg) + + def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2): + # GH 25183 + df = DataFrame( + {"key": series_of_dtype, "value": series_of_dtype2}, + columns=["key", "value"], + ) + df_empty = df[:0] + expected = DataFrame( + { + "key": Series(dtype=df.dtypes["key"]), + "value_x": Series(dtype=df.dtypes["value"]), + "value_y": Series(dtype=df.dtypes["value"]), + }, + columns=["key", "value_x", "value_y"], + ) + actual = df_empty.merge(df, on="key") + tm.assert_frame_equal(actual, expected) + + def test_merge_all_na_column(self, series_of_dtype, series_of_dtype_all_na): + # GH 25183 + df_left = DataFrame( + {"key": series_of_dtype, "value": series_of_dtype_all_na}, + columns=["key", "value"], + ) + df_right = DataFrame( + {"key": series_of_dtype, "value": series_of_dtype_all_na}, + columns=["key", "value"], + ) + expected = DataFrame( + { + "key": series_of_dtype, + "value_x": series_of_dtype_all_na, + "value_y": series_of_dtype_all_na, + }, + columns=["key", "value_x", "value_y"], + ) + actual = df_left.merge(df_right, on="key") + tm.assert_frame_equal(actual, expected) + + def test_merge_nosort(self): + # GH#2098 + + d = { + "var1": np.random.default_rng(2).integers(0, 10, size=10), + "var2": np.random.default_rng(2).integers(0, 10, size=10), + "var3": [ + datetime(2012, 1, 12), + datetime(2011, 2, 4), + datetime(2010, 2, 3), + datetime(2012, 1, 12), + datetime(2011, 2, 4), + datetime(2012, 4, 3), + datetime(2012, 3, 4), + datetime(2008, 5, 1), + datetime(2010, 2, 3), + datetime(2012, 2, 3), + ], + } + df = DataFrame.from_dict(d) + var3 = df.var3.unique() + var3 = np.sort(var3) + new = DataFrame.from_dict( + {"var3": var3, "var8": np.random.default_rng(2).random(7)} + ) + + result = df.merge(new, on="var3", sort=False) + exp = merge(df, new, on="var3", sort=False) + tm.assert_frame_equal(result, exp) + + assert (df.var3.unique() == result.var3.unique()).all() + + @pytest.mark.parametrize( + ("sort", "values"), [(False, [1, 1, 0, 1, 1]), (True, [0, 1, 1, 1, 1])] + ) + @pytest.mark.parametrize("how", ["left", "right"]) + def test_merge_same_order_left_right(self, sort, values, how): + # GH#35382 + df = DataFrame({"a": [1, 0, 1]}) + + result = df.merge(df, on="a", how=how, sort=sort) + expected = DataFrame(values, columns=["a"]) + tm.assert_frame_equal(result, expected) + + def test_merge_nan_right(self): + df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]}) + df2 = DataFrame({"i1": [0], "i3": [0]}) + result = df1.join(df2, on="i1", rsuffix="_") + expected = ( + DataFrame( + { + "i1": {0: 0.0, 1: 1}, + "i2": {0: 0, 1: 1}, + "i1_": {0: 0, 1: np.nan}, + "i3": {0: 0.0, 1: np.nan}, + None: {0: 0, 1: 0}, + }, + columns=Index(["i1", "i2", "i1_", "i3", None], dtype=object), + ) + .set_index(None) + .reset_index()[["i1", "i2", "i1_", "i3"]] + ) + result.columns = result.columns.astype("object") + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_merge_nan_right2(self): + df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]}) + df2 = DataFrame({"i1": [0], "i3": [0.7]}) + result = df1.join(df2, rsuffix="_", on="i1") + expected = DataFrame( + { + "i1": {0: 0, 1: 1}, + "i1_": {0: 0.0, 1: np.nan}, + "i2": {0: 0.5, 1: 1.5}, + "i3": {0: 0.69999999999999996, 1: np.nan}, + } + )[["i1", "i2", "i1_", "i3"]] + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" + ) + def test_merge_type(self, df, df2): + class NotADataFrame(DataFrame): + @property + def _constructor(self): + return NotADataFrame + + nad = NotADataFrame(df) + result = nad.merge(df2, on="key1") + + assert isinstance(result, NotADataFrame) + + def test_join_append_timedeltas(self, using_array_manager): + # timedelta64 issues with join/merge + # GH 5695 + + d = DataFrame.from_dict( + {"d": [datetime(2013, 11, 5, 5, 56)], "t": [timedelta(0, 22500)]} + ) + df = DataFrame(columns=list("dt")) + msg = "The behavior of DataFrame concatenation with empty or all-NA entries" + warn = FutureWarning + if using_array_manager: + warn = None + with tm.assert_produces_warning(warn, match=msg): + df = concat([df, d], ignore_index=True) + result = concat([df, d], ignore_index=True) + expected = DataFrame( + { + "d": [datetime(2013, 11, 5, 5, 56), datetime(2013, 11, 5, 5, 56)], + "t": [timedelta(0, 22500), timedelta(0, 22500)], + } + ) + if using_array_manager: + # TODO(ArrayManager) decide on exact casting rules in concat + expected = expected.astype(object) + tm.assert_frame_equal(result, expected) + + def test_join_append_timedeltas2(self): + # timedelta64 issues with join/merge + # GH 5695 + td = np.timedelta64(300000000) + lhs = DataFrame(Series([td, td], index=["A", "B"])) + rhs = DataFrame(Series([td], index=["A"])) + + result = lhs.join(rhs, rsuffix="r", how="left") + expected = DataFrame( + { + "0": Series([td, td], index=list("AB")), + "0r": Series([td, pd.NaT], index=list("AB")), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) + def test_other_datetime_unit(self, unit): + # GH 13389 + df1 = DataFrame({"entity_id": [101, 102]}) + ser = Series([None, None], index=[101, 102], name="days") + + dtype = f"datetime64[{unit}]" + + if unit in ["D", "h", "m"]: + # not supported so we cast to the nearest supported unit, seconds + exp_dtype = "datetime64[s]" + else: + exp_dtype = dtype + df2 = ser.astype(exp_dtype).to_frame("days") + assert df2["days"].dtype == exp_dtype + + result = df1.merge(df2, left_on="entity_id", right_index=True) + + days = np.array(["nat", "nat"], dtype=exp_dtype) + days = pd.core.arrays.DatetimeArray._simple_new(days, dtype=days.dtype) + exp = DataFrame( + { + "entity_id": [101, 102], + "days": days, + }, + columns=["entity_id", "days"], + ) + assert exp["days"].dtype == exp_dtype + tm.assert_frame_equal(result, exp) + + @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) + def test_other_timedelta_unit(self, unit): + # GH 13389 + df1 = DataFrame({"entity_id": [101, 102]}) + ser = Series([None, None], index=[101, 102], name="days") + + dtype = f"m8[{unit}]" + if unit in ["D", "h", "m"]: + # We cannot astype, instead do nearest supported unit, i.e. "s" + msg = "Supported resolutions are 's', 'ms', 'us', 'ns'" + with pytest.raises(ValueError, match=msg): + ser.astype(dtype) + + df2 = ser.astype("m8[s]").to_frame("days") + else: + df2 = ser.astype(dtype).to_frame("days") + assert df2["days"].dtype == dtype + + result = df1.merge(df2, left_on="entity_id", right_index=True) + + exp = DataFrame( + {"entity_id": [101, 102], "days": np.array(["nat", "nat"], dtype=dtype)}, + columns=["entity_id", "days"], + ) + tm.assert_frame_equal(result, exp) + + def test_overlapping_columns_error_message(self): + df = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]}) + df2 = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]}) + + df.columns = ["key", "foo", "foo"] + df2.columns = ["key", "bar", "bar"] + expected = DataFrame( + { + "key": [1, 2, 3], + "v1": [4, 5, 6], + "v2": [7, 8, 9], + "v3": [4, 5, 6], + "v4": [7, 8, 9], + } + ) + expected.columns = ["key", "foo", "foo", "bar", "bar"] + tm.assert_frame_equal(merge(df, df2), expected) + + # #2649, #10639 + df2.columns = ["key1", "foo", "foo"] + msg = r"Data columns not unique: Index\(\['foo'\], dtype='object|string'\)" + with pytest.raises(MergeError, match=msg): + merge(df, df2) + + def test_merge_on_datetime64tz(self): + # GH11405 + left = DataFrame( + { + "key": pd.date_range("20151010", periods=2, tz="US/Eastern"), + "value": [1, 2], + } + ) + right = DataFrame( + { + "key": pd.date_range("20151011", periods=3, tz="US/Eastern"), + "value": [1, 2, 3], + } + ) + + expected = DataFrame( + { + "key": pd.date_range("20151010", periods=4, tz="US/Eastern"), + "value_x": [1, 2, np.nan, np.nan], + "value_y": [np.nan, 1, 2, 3], + } + ) + result = merge(left, right, on="key", how="outer") + tm.assert_frame_equal(result, expected) + + def test_merge_datetime64tz_values(self): + left = DataFrame( + { + "key": [1, 2], + "value": pd.date_range("20151010", periods=2, tz="US/Eastern"), + } + ) + right = DataFrame( + { + "key": [2, 3], + "value": pd.date_range("20151011", periods=2, tz="US/Eastern"), + } + ) + expected = DataFrame( + { + "key": [1, 2, 3], + "value_x": list(pd.date_range("20151010", periods=2, tz="US/Eastern")) + + [pd.NaT], + "value_y": [pd.NaT] + + list(pd.date_range("20151011", periods=2, tz="US/Eastern")), + } + ) + result = merge(left, right, on="key", how="outer") + tm.assert_frame_equal(result, expected) + assert result["value_x"].dtype == "datetime64[ns, US/Eastern]" + assert result["value_y"].dtype == "datetime64[ns, US/Eastern]" + + def test_merge_on_datetime64tz_empty(self): + # https://github.com/pandas-dev/pandas/issues/25014 + dtz = pd.DatetimeTZDtype(tz="UTC") + right = DataFrame( + { + "date": DatetimeIndex(["2018"], dtype=dtz), + "value": [4.0], + "date2": DatetimeIndex(["2019"], dtype=dtz), + }, + columns=["date", "value", "date2"], + ) + left = right[:0] + result = left.merge(right, on="date") + expected = DataFrame( + { + "date": Series(dtype=dtz), + "value_x": Series(dtype=float), + "date2_x": Series(dtype=dtz), + "value_y": Series(dtype=float), + "date2_y": Series(dtype=dtz), + }, + columns=["date", "value_x", "date2_x", "value_y", "date2_y"], + ) + tm.assert_frame_equal(result, expected) + + def test_merge_datetime64tz_with_dst_transition(self): + # GH 18885 + df1 = DataFrame( + pd.date_range("2017-10-29 01:00", periods=4, freq="h", tz="Europe/Madrid"), + columns=["date"], + ) + df1["value"] = 1 + df2 = DataFrame( + { + "date": pd.to_datetime( + [ + "2017-10-29 03:00:00", + "2017-10-29 04:00:00", + "2017-10-29 05:00:00", + ] + ), + "value": 2, + } + ) + df2["date"] = df2["date"].dt.tz_localize("UTC").dt.tz_convert("Europe/Madrid") + result = merge(df1, df2, how="outer", on="date") + expected = DataFrame( + { + "date": pd.date_range( + "2017-10-29 01:00", periods=7, freq="h", tz="Europe/Madrid" + ), + "value_x": [1] * 4 + [np.nan] * 3, + "value_y": [np.nan] * 4 + [2] * 3, + } + ) + tm.assert_frame_equal(result, expected) + + def test_merge_non_unique_period_index(self): + # GH #16871 + index = pd.period_range("2016-01-01", periods=16, freq="M") + df = DataFrame(list(range(len(index))), index=index, columns=["pnum"]) + df2 = concat([df, df]) + result = df.merge(df2, left_index=True, right_index=True, how="inner") + expected = DataFrame( + np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2), + columns=["pnum_x", "pnum_y"], + index=df2.sort_index().index, + ) + tm.assert_frame_equal(result, expected) + + def test_merge_on_periods(self): + left = DataFrame( + {"key": pd.period_range("20151010", periods=2, freq="D"), "value": [1, 2]} + ) + right = DataFrame( + { + "key": pd.period_range("20151011", periods=3, freq="D"), + "value": [1, 2, 3], + } + ) + + expected = DataFrame( + { + "key": pd.period_range("20151010", periods=4, freq="D"), + "value_x": [1, 2, np.nan, np.nan], + "value_y": [np.nan, 1, 2, 3], + } + ) + result = merge(left, right, on="key", how="outer") + tm.assert_frame_equal(result, expected) + + def test_merge_period_values(self): + left = DataFrame( + {"key": [1, 2], "value": pd.period_range("20151010", periods=2, freq="D")} + ) + right = DataFrame( + {"key": [2, 3], "value": pd.period_range("20151011", periods=2, freq="D")} + ) + + exp_x = pd.period_range("20151010", periods=2, freq="D") + exp_y = pd.period_range("20151011", periods=2, freq="D") + expected = DataFrame( + { + "key": [1, 2, 3], + "value_x": list(exp_x) + [pd.NaT], + "value_y": [pd.NaT] + list(exp_y), + } + ) + result = merge(left, right, on="key", how="outer") + tm.assert_frame_equal(result, expected) + assert result["value_x"].dtype == "Period[D]" + assert result["value_y"].dtype == "Period[D]" + + def test_indicator(self, dfs_for_indicator): + # PR #10054. xref #7412 and closes #8790. + df1, df2 = dfs_for_indicator + df1_copy = df1.copy() + + df2_copy = df2.copy() + + df_result = DataFrame( + { + "col1": [0, 1, 2, 3, 4, 5], + "col_conflict_x": [1, 2, np.nan, np.nan, np.nan, np.nan], + "col_left": ["a", "b", np.nan, np.nan, np.nan, np.nan], + "col_conflict_y": [np.nan, 1, 2, 3, 4, 5], + "col_right": [np.nan, 2, 2, 2, 2, 2], + } + ) + df_result["_merge"] = Categorical( + [ + "left_only", + "both", + "right_only", + "right_only", + "right_only", + "right_only", + ], + categories=["left_only", "right_only", "both"], + ) + + df_result = df_result[ + [ + "col1", + "col_conflict_x", + "col_left", + "col_conflict_y", + "col_right", + "_merge", + ] + ] + + test = merge(df1, df2, on="col1", how="outer", indicator=True) + tm.assert_frame_equal(test, df_result) + test = df1.merge(df2, on="col1", how="outer", indicator=True) + tm.assert_frame_equal(test, df_result) + + # No side effects + tm.assert_frame_equal(df1, df1_copy) + tm.assert_frame_equal(df2, df2_copy) + + # Check with custom name + df_result_custom_name = df_result + df_result_custom_name = df_result_custom_name.rename( + columns={"_merge": "custom_name"} + ) + + test_custom_name = merge( + df1, df2, on="col1", how="outer", indicator="custom_name" + ) + tm.assert_frame_equal(test_custom_name, df_result_custom_name) + test_custom_name = df1.merge( + df2, on="col1", how="outer", indicator="custom_name" + ) + tm.assert_frame_equal(test_custom_name, df_result_custom_name) + + def test_merge_indicator_arg_validation(self, dfs_for_indicator): + # Check only accepts strings and booleans + df1, df2 = dfs_for_indicator + + msg = "indicator option can only accept boolean or string arguments" + with pytest.raises(ValueError, match=msg): + merge(df1, df2, on="col1", how="outer", indicator=5) + with pytest.raises(ValueError, match=msg): + df1.merge(df2, on="col1", how="outer", indicator=5) + + def test_merge_indicator_result_integrity(self, dfs_for_indicator): + # Check result integrity + df1, df2 = dfs_for_indicator + + test2 = merge(df1, df2, on="col1", how="left", indicator=True) + assert (test2._merge != "right_only").all() + test2 = df1.merge(df2, on="col1", how="left", indicator=True) + assert (test2._merge != "right_only").all() + + test3 = merge(df1, df2, on="col1", how="right", indicator=True) + assert (test3._merge != "left_only").all() + test3 = df1.merge(df2, on="col1", how="right", indicator=True) + assert (test3._merge != "left_only").all() + + test4 = merge(df1, df2, on="col1", how="inner", indicator=True) + assert (test4._merge == "both").all() + test4 = df1.merge(df2, on="col1", how="inner", indicator=True) + assert (test4._merge == "both").all() + + def test_merge_indicator_invalid(self, dfs_for_indicator): + # Check if working name in df + df1, _ = dfs_for_indicator + + for i in ["_right_indicator", "_left_indicator", "_merge"]: + df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]}) + + msg = ( + "Cannot use `indicator=True` option when data contains a " + f"column named {i}|" + "Cannot use name of an existing column for indicator column" + ) + with pytest.raises(ValueError, match=msg): + merge(df1, df_badcolumn, on="col1", how="outer", indicator=True) + with pytest.raises(ValueError, match=msg): + df1.merge(df_badcolumn, on="col1", how="outer", indicator=True) + + # Check for name conflict with custom name + df_badcolumn = DataFrame({"col1": [1, 2], "custom_column_name": [2, 2]}) + + msg = "Cannot use name of an existing column for indicator column" + with pytest.raises(ValueError, match=msg): + merge( + df1, + df_badcolumn, + on="col1", + how="outer", + indicator="custom_column_name", + ) + with pytest.raises(ValueError, match=msg): + df1.merge( + df_badcolumn, on="col1", how="outer", indicator="custom_column_name" + ) + + def test_merge_indicator_multiple_columns(self): + # Merge on multiple columns + df3 = DataFrame({"col1": [0, 1], "col2": ["a", "b"]}) + + df4 = DataFrame({"col1": [1, 1, 3], "col2": ["b", "x", "y"]}) + + hand_coded_result = DataFrame( + {"col1": [0, 1, 1, 3], "col2": ["a", "b", "x", "y"]} + ) + hand_coded_result["_merge"] = Categorical( + ["left_only", "both", "right_only", "right_only"], + categories=["left_only", "right_only", "both"], + ) + + test5 = merge(df3, df4, on=["col1", "col2"], how="outer", indicator=True) + tm.assert_frame_equal(test5, hand_coded_result) + test5 = df3.merge(df4, on=["col1", "col2"], how="outer", indicator=True) + tm.assert_frame_equal(test5, hand_coded_result) + + def test_validation(self): + left = DataFrame( + {"a": ["a", "b", "c", "d"], "b": ["cat", "dog", "weasel", "horse"]}, + index=range(4), + ) + + right = DataFrame( + { + "a": ["a", "b", "c", "d", "e"], + "c": ["meow", "bark", "um... weasel noise?", "nay", "chirp"], + }, + index=range(5), + ) + + # Make sure no side effects. + left_copy = left.copy() + right_copy = right.copy() + + result = merge(left, right, left_index=True, right_index=True, validate="1:1") + tm.assert_frame_equal(left, left_copy) + tm.assert_frame_equal(right, right_copy) + + # make sure merge still correct + expected = DataFrame( + { + "a_x": ["a", "b", "c", "d"], + "b": ["cat", "dog", "weasel", "horse"], + "a_y": ["a", "b", "c", "d"], + "c": ["meow", "bark", "um... weasel noise?", "nay"], + }, + index=range(4), + columns=["a_x", "b", "a_y", "c"], + ) + + result = merge( + left, right, left_index=True, right_index=True, validate="one_to_one" + ) + tm.assert_frame_equal(result, expected) + + expected_2 = DataFrame( + { + "a": ["a", "b", "c", "d"], + "b": ["cat", "dog", "weasel", "horse"], + "c": ["meow", "bark", "um... weasel noise?", "nay"], + }, + index=range(4), + ) + + result = merge(left, right, on="a", validate="1:1") + tm.assert_frame_equal(left, left_copy) + tm.assert_frame_equal(right, right_copy) + tm.assert_frame_equal(result, expected_2) + + result = merge(left, right, on="a", validate="one_to_one") + tm.assert_frame_equal(result, expected_2) + + # One index, one column + expected_3 = DataFrame( + { + "b": ["cat", "dog", "weasel", "horse"], + "a": ["a", "b", "c", "d"], + "c": ["meow", "bark", "um... weasel noise?", "nay"], + }, + columns=["b", "a", "c"], + index=range(4), + ) + + left_index_reset = left.set_index("a") + result = merge( + left_index_reset, + right, + left_index=True, + right_on="a", + validate="one_to_one", + ) + tm.assert_frame_equal(result, expected_3) + + # Dups on right + right_w_dups = concat([right, DataFrame({"a": ["e"], "c": ["moo"]}, index=[4])]) + merge( + left, + right_w_dups, + left_index=True, + right_index=True, + validate="one_to_many", + ) + + msg = "Merge keys are not unique in right dataset; not a one-to-one merge" + with pytest.raises(MergeError, match=msg): + merge( + left, + right_w_dups, + left_index=True, + right_index=True, + validate="one_to_one", + ) + + with pytest.raises(MergeError, match=msg): + merge(left, right_w_dups, on="a", validate="one_to_one") + + # Dups on left + left_w_dups = concat( + [left, DataFrame({"a": ["a"], "c": ["cow"]}, index=[3])], sort=True + ) + merge( + left_w_dups, + right, + left_index=True, + right_index=True, + validate="many_to_one", + ) + + msg = "Merge keys are not unique in left dataset; not a one-to-one merge" + with pytest.raises(MergeError, match=msg): + merge( + left_w_dups, + right, + left_index=True, + right_index=True, + validate="one_to_one", + ) + + with pytest.raises(MergeError, match=msg): + merge(left_w_dups, right, on="a", validate="one_to_one") + + # Dups on both + merge(left_w_dups, right_w_dups, on="a", validate="many_to_many") + + msg = "Merge keys are not unique in right dataset; not a many-to-one merge" + with pytest.raises(MergeError, match=msg): + merge( + left_w_dups, + right_w_dups, + left_index=True, + right_index=True, + validate="many_to_one", + ) + + msg = "Merge keys are not unique in left dataset; not a one-to-many merge" + with pytest.raises(MergeError, match=msg): + merge(left_w_dups, right_w_dups, on="a", validate="one_to_many") + + # Check invalid arguments + msg = ( + '"jibberish" is not a valid argument. ' + "Valid arguments are:\n" + '- "1:1"\n' + '- "1:m"\n' + '- "m:1"\n' + '- "m:m"\n' + '- "one_to_one"\n' + '- "one_to_many"\n' + '- "many_to_one"\n' + '- "many_to_many"' + ) + with pytest.raises(ValueError, match=msg): + merge(left, right, on="a", validate="jibberish") + + # Two column merge, dups in both, but jointly no dups. + left = DataFrame( + { + "a": ["a", "a", "b", "b"], + "b": [0, 1, 0, 1], + "c": ["cat", "dog", "weasel", "horse"], + }, + index=range(4), + ) + + right = DataFrame( + { + "a": ["a", "a", "b"], + "b": [0, 1, 0], + "d": ["meow", "bark", "um... weasel noise?"], + }, + index=range(3), + ) + + expected_multi = DataFrame( + { + "a": ["a", "a", "b"], + "b": [0, 1, 0], + "c": ["cat", "dog", "weasel"], + "d": ["meow", "bark", "um... weasel noise?"], + }, + index=range(3), + ) + + msg = ( + "Merge keys are not unique in either left or right dataset; " + "not a one-to-one merge" + ) + with pytest.raises(MergeError, match=msg): + merge(left, right, on="a", validate="1:1") + + result = merge(left, right, on=["a", "b"], validate="1:1") + tm.assert_frame_equal(result, expected_multi) + + def test_merge_two_empty_df_no_division_error(self): + # GH17776, PR #17846 + a = DataFrame({"a": [], "b": [], "c": []}) + with np.errstate(divide="raise"): + merge(a, a, on=("a", "b")) + + @pytest.mark.parametrize("how", ["right", "outer"]) + @pytest.mark.parametrize( + "index,expected_index", + [ + ( + CategoricalIndex([1, 2, 4]), + CategoricalIndex([1, 2, 4, None, None, None]), + ), + ( + DatetimeIndex( + ["2001-01-01", "2002-02-02", "2003-03-03"], dtype="M8[ns]" + ), + DatetimeIndex( + ["2001-01-01", "2002-02-02", "2003-03-03", pd.NaT, pd.NaT, pd.NaT], + dtype="M8[ns]", + ), + ), + *[ + ( + Index([1, 2, 3], dtype=dtyp), + Index([1, 2, 3, None, None, None], dtype=np.float64), + ) + for dtyp in tm.ALL_REAL_NUMPY_DTYPES + ], + ( + IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4)]), + IntervalIndex.from_tuples( + [(1, 2), (2, 3), (3, 4), np.nan, np.nan, np.nan] + ), + ), + ( + PeriodIndex(["2001-01-01", "2001-01-02", "2001-01-03"], freq="D"), + PeriodIndex( + ["2001-01-01", "2001-01-02", "2001-01-03", pd.NaT, pd.NaT, pd.NaT], + freq="D", + ), + ), + ( + TimedeltaIndex(["1d", "2d", "3d"]), + TimedeltaIndex(["1d", "2d", "3d", pd.NaT, pd.NaT, pd.NaT]), + ), + ], + ) + def test_merge_on_index_with_more_values(self, how, index, expected_index): + # GH 24212 + # pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that + # -1 is interpreted as a missing value instead of the last element + df1 = DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index) + df2 = DataFrame({"b": [0, 1, 2, 3, 4, 5]}) + result = df1.merge(df2, left_on="key", right_index=True, how=how) + expected = DataFrame( + [ + [0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [np.nan, 3, 3], + [np.nan, 4, 4], + [np.nan, 5, 5], + ], + columns=["a", "key", "b"], + ) + expected.set_index(expected_index, inplace=True) + tm.assert_frame_equal(result, expected) + + def test_merge_right_index_right(self): + # Note: the expected output here is probably incorrect. + # See https://github.com/pandas-dev/pandas/issues/17257 for more. + # We include this as a regression test for GH-24897. + left = DataFrame({"a": [1, 2, 3], "key": [0, 1, 1]}) + right = DataFrame({"b": [1, 2, 3]}) + + expected = DataFrame( + {"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]}, + columns=["a", "key", "b"], + index=[0, 1, 2, np.nan], + ) + result = left.merge(right, left_on="key", right_index=True, how="right") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("how", ["left", "right"]) + def test_merge_preserves_row_order(self, how): + # GH 27453 + left_df = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]}) + right_df = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]}) + result = left_df.merge(right_df, on=["animal", "max_speed"], how=how) + if how == "right": + expected = DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]}) + else: + expected = DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]}) + tm.assert_frame_equal(result, expected) + + def test_merge_take_missing_values_from_index_of_other_dtype(self): + # GH 24212 + left = DataFrame( + { + "a": [1, 2, 3], + "key": Categorical(["a", "a", "b"], categories=list("abc")), + } + ) + right = DataFrame({"b": [1, 2, 3]}, index=CategoricalIndex(["a", "b", "c"])) + result = left.merge(right, left_on="key", right_index=True, how="right") + expected = DataFrame( + { + "a": [1, 2, 3, None], + "key": Categorical(["a", "a", "b", "c"]), + "b": [1, 1, 2, 3], + }, + index=[0, 1, 2, np.nan], + ) + expected = expected.reindex(columns=["a", "key", "b"]) + tm.assert_frame_equal(result, expected) + + def test_merge_readonly(self): + # https://github.com/pandas-dev/pandas/issues/27943 + data1 = DataFrame( + np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"] + ) + data2 = DataFrame( + np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"] + ) + + # make each underlying block array / column array read-only + for arr in data1._mgr.arrays: + arr.flags.writeable = False + + data1.merge(data2) # no error + + +def _check_merge(x, y): + for how in ["inner", "left", "outer"]: + for sort in [True, False]: + result = x.join(y, how=how, sort=sort) + + expected = merge(x.reset_index(), y.reset_index(), how=how, sort=sort) + expected = expected.set_index("index") + + # TODO check_names on merge? + tm.assert_frame_equal(result, expected, check_names=False) + + +class TestMergeDtypes: + @pytest.mark.parametrize( + "right_vals", [["foo", "bar"], Series(["foo", "bar"]).astype("category")] + ) + def test_different(self, right_vals): + left = DataFrame( + { + "A": ["foo", "bar"], + "B": Series(["foo", "bar"]).astype("category"), + "C": [1, 2], + "D": [1.0, 2.0], + "E": Series([1, 2], dtype="uint64"), + "F": Series([1, 2], dtype="int32"), + } + ) + right = DataFrame({"A": right_vals}) + + # GH 9780 + # We allow merging on object and categorical cols and cast + # categorical cols to object + result = merge(left, right, on="A") + assert is_object_dtype(result.A.dtype) or is_string_dtype(result.A.dtype) + + @pytest.mark.parametrize( + "d1", [np.int64, np.int32, np.intc, np.int16, np.int8, np.uint8] + ) + @pytest.mark.parametrize("d2", [np.int64, np.float64, np.float32, np.float16]) + def test_join_multi_dtypes(self, d1, d2): + dtype1 = np.dtype(d1) + dtype2 = np.dtype(d2) + + left = DataFrame( + { + "k1": np.array([0, 1, 2] * 8, dtype=dtype1), + "k2": ["foo", "bar"] * 12, + "v": np.array(np.arange(24), dtype=np.int64), + } + ) + + index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")]) + right = DataFrame({"v2": np.array([5, 7], dtype=dtype2)}, index=index) + + result = left.join(right, on=["k1", "k2"]) + + expected = left.copy() + + if dtype2.kind == "i": + dtype2 = np.dtype("float64") + expected["v2"] = np.array(np.nan, dtype=dtype2) + expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5 + expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7 + + tm.assert_frame_equal(result, expected) + + result = left.join(right, on=["k1", "k2"], sort=True) + expected.sort_values(["k1", "k2"], kind="mergesort", inplace=True) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "int_vals, float_vals, exp_vals", + [ + ([1, 2, 3], [1.0, 2.0, 3.0], {"X": [1, 2, 3], "Y": [1.0, 2.0, 3.0]}), + ([1, 2, 3], [1.0, 3.0], {"X": [1, 3], "Y": [1.0, 3.0]}), + ([1, 2], [1.0, 2.0, 3.0], {"X": [1, 2], "Y": [1.0, 2.0]}), + ], + ) + def test_merge_on_ints_floats(self, int_vals, float_vals, exp_vals): + # GH 16572 + # Check that float column is not cast to object if + # merging on float and int columns + A = DataFrame({"X": int_vals}) + B = DataFrame({"Y": float_vals}) + expected = DataFrame(exp_vals) + + result = A.merge(B, left_on="X", right_on="Y") + tm.assert_frame_equal(result, expected) + + result = B.merge(A, left_on="Y", right_on="X") + tm.assert_frame_equal(result, expected[["Y", "X"]]) + + def test_merge_key_dtype_cast(self): + # GH 17044 + df1 = DataFrame({"key": [1.0, 2.0], "v1": [10, 20]}, columns=["key", "v1"]) + df2 = DataFrame({"key": [2], "v2": [200]}, columns=["key", "v2"]) + result = df1.merge(df2, on="key", how="left") + expected = DataFrame( + {"key": [1.0, 2.0], "v1": [10, 20], "v2": [np.nan, 200.0]}, + columns=["key", "v1", "v2"], + ) + tm.assert_frame_equal(result, expected) + + def test_merge_on_ints_floats_warning(self): + # GH 16572 + # merge will produce a warning when merging on int and + # float columns where the float values are not exactly + # equal to their int representation + A = DataFrame({"X": [1, 2, 3]}) + B = DataFrame({"Y": [1.1, 2.5, 3.0]}) + expected = DataFrame({"X": [3], "Y": [3.0]}) + + with tm.assert_produces_warning(UserWarning): + result = A.merge(B, left_on="X", right_on="Y") + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(UserWarning): + result = B.merge(A, left_on="Y", right_on="X") + tm.assert_frame_equal(result, expected[["Y", "X"]]) + + # test no warning if float has NaNs + B = DataFrame({"Y": [np.nan, np.nan, 3.0]}) + + with tm.assert_produces_warning(None): + result = B.merge(A, left_on="Y", right_on="X") + tm.assert_frame_equal(result, expected[["Y", "X"]]) + + def test_merge_incompat_infer_boolean_object(self): + # GH21119: bool + object bool merge OK + df1 = DataFrame({"key": Series([True, False], dtype=object)}) + df2 = DataFrame({"key": [True, False]}) + + expected = DataFrame({"key": [True, False]}, dtype=object) + result = merge(df1, df2, on="key") + tm.assert_frame_equal(result, expected) + result = merge(df2, df1, on="key") + tm.assert_frame_equal(result, expected) + + def test_merge_incompat_infer_boolean_object_with_missing(self): + # GH21119: bool + object bool merge OK + # with missing value + df1 = DataFrame({"key": Series([True, False, np.nan], dtype=object)}) + df2 = DataFrame({"key": [True, False]}) + + expected = DataFrame({"key": [True, False]}, dtype=object) + result = merge(df1, df2, on="key") + tm.assert_frame_equal(result, expected) + result = merge(df2, df1, on="key") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "df1_vals, df2_vals", + [ + # merge on category coerces to object + ([0, 1, 2], Series(["a", "b", "a"]).astype("category")), + ([0.0, 1.0, 2.0], Series(["a", "b", "a"]).astype("category")), + # no not infer + ([0, 1], Series([False, True], dtype=object)), + ([0, 1], Series([False, True], dtype=bool)), + ], + ) + def test_merge_incompat_dtypes_are_ok(self, df1_vals, df2_vals): + # these are explicitly allowed incompat merges, that pass thru + # the result type is dependent on if the values on the rhs are + # inferred, otherwise these will be coerced to object + + df1 = DataFrame({"A": df1_vals}) + df2 = DataFrame({"A": df2_vals}) + + result = merge(df1, df2, on=["A"]) + assert is_object_dtype(result.A.dtype) + result = merge(df2, df1, on=["A"]) + assert is_object_dtype(result.A.dtype) or is_string_dtype(result.A.dtype) + + @pytest.mark.parametrize( + "df1_vals, df2_vals", + [ + # do not infer to numeric + (Series([1, 2], dtype="uint64"), ["a", "b", "c"]), + (Series([1, 2], dtype="int32"), ["a", "b", "c"]), + ([0, 1, 2], ["0", "1", "2"]), + ([0.0, 1.0, 2.0], ["0", "1", "2"]), + ([0, 1, 2], ["0", "1", "2"]), + ( + pd.date_range("1/1/2011", periods=2, freq="D"), + ["2011-01-01", "2011-01-02"], + ), + (pd.date_range("1/1/2011", periods=2, freq="D"), [0, 1]), + (pd.date_range("1/1/2011", periods=2, freq="D"), [0.0, 1.0]), + ( + pd.date_range("20130101", periods=3), + pd.date_range("20130101", periods=3, tz="US/Eastern"), + ), + ], + ) + def test_merge_incompat_dtypes_error(self, df1_vals, df2_vals): + # GH 9780, GH 15800 + # Raise a ValueError when a user tries to merge on + # dtypes that are incompatible (e.g., obj and int/float) + + df1 = DataFrame({"A": df1_vals}) + df2 = DataFrame({"A": df2_vals}) + + msg = ( + f"You are trying to merge on {df1['A'].dtype} and {df2['A'].dtype} " + "columns for key 'A'. If you wish to proceed you should use pd.concat" + ) + msg = re.escape(msg) + with pytest.raises(ValueError, match=msg): + merge(df1, df2, on=["A"]) + + # Check that error still raised when swapping order of dataframes + msg = ( + f"You are trying to merge on {df2['A'].dtype} and {df1['A'].dtype} " + "columns for key 'A'. If you wish to proceed you should use pd.concat" + ) + msg = re.escape(msg) + with pytest.raises(ValueError, match=msg): + merge(df2, df1, on=["A"]) + + # Check that error still raised when merging on multiple columns + # The error message should mention the first incompatible column + if len(df1_vals) == len(df2_vals): + # Column A in df1 and df2 is of compatible (the same) dtype + # Columns B and C in df1 and df2 are of incompatible dtypes + df3 = DataFrame({"A": df2_vals, "B": df1_vals, "C": df1_vals}) + df4 = DataFrame({"A": df2_vals, "B": df2_vals, "C": df2_vals}) + + # Check that error raised correctly when merging all columns A, B, and C + # The error message should mention key 'B' + msg = ( + f"You are trying to merge on {df3['B'].dtype} and {df4['B'].dtype} " + "columns for key 'B'. If you wish to proceed you should use pd.concat" + ) + msg = re.escape(msg) + with pytest.raises(ValueError, match=msg): + merge(df3, df4) + + # Check that error raised correctly when merging columns A and C + # The error message should mention key 'C' + msg = ( + f"You are trying to merge on {df3['C'].dtype} and {df4['C'].dtype} " + "columns for key 'C'. If you wish to proceed you should use pd.concat" + ) + msg = re.escape(msg) + with pytest.raises(ValueError, match=msg): + merge(df3, df4, on=["A", "C"]) + + @pytest.mark.parametrize( + "expected_data, how", + [ + ([1, 2], "outer"), + ([], "inner"), + ([2], "right"), + ([1], "left"), + ], + ) + def test_merge_EA_dtype(self, any_numeric_ea_dtype, how, expected_data): + # GH#40073 + d1 = DataFrame([(1,)], columns=["id"], dtype=any_numeric_ea_dtype) + d2 = DataFrame([(2,)], columns=["id"], dtype=any_numeric_ea_dtype) + result = merge(d1, d2, how=how) + exp_index = RangeIndex(len(expected_data)) + expected = DataFrame( + expected_data, index=exp_index, columns=["id"], dtype=any_numeric_ea_dtype + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "expected_data, how", + [ + (["a", "b"], "outer"), + ([], "inner"), + (["b"], "right"), + (["a"], "left"), + ], + ) + def test_merge_string_dtype(self, how, expected_data, any_string_dtype): + # GH#40073 + d1 = DataFrame([("a",)], columns=["id"], dtype=any_string_dtype) + d2 = DataFrame([("b",)], columns=["id"], dtype=any_string_dtype) + result = merge(d1, d2, how=how) + exp_idx = RangeIndex(len(expected_data)) + expected = DataFrame( + expected_data, index=exp_idx, columns=["id"], dtype=any_string_dtype + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "how, expected_data", + [ + ("inner", [[True, 1, 4], [False, 5, 3]]), + ("outer", [[False, 5, 3], [True, 1, 4]]), + ("left", [[True, 1, 4], [False, 5, 3]]), + ("right", [[False, 5, 3], [True, 1, 4]]), + ], + ) + def test_merge_bool_dtype(self, how, expected_data): + # GH#40073 + df1 = DataFrame({"A": [True, False], "B": [1, 5]}) + df2 = DataFrame({"A": [False, True], "C": [3, 4]}) + result = merge(df1, df2, how=how) + expected = DataFrame(expected_data, columns=["A", "B", "C"]) + tm.assert_frame_equal(result, expected) + + def test_merge_ea_with_string(self, join_type, string_dtype): + # GH 43734 Avoid the use of `assign` with multi-index + df1 = DataFrame( + data={ + ("lvl0", "lvl1-a"): ["1", "2", "3", "4", None], + ("lvl0", "lvl1-b"): ["4", "5", "6", "7", "8"], + }, + dtype=pd.StringDtype(), + ) + df1_copy = df1.copy() + df2 = DataFrame( + data={ + ("lvl0", "lvl1-a"): ["1", "2", "3", pd.NA, "5"], + ("lvl0", "lvl1-c"): ["7", "8", "9", pd.NA, "11"], + }, + dtype=string_dtype, + ) + df2_copy = df2.copy() + merged = merge(left=df1, right=df2, on=[("lvl0", "lvl1-a")], how=join_type) + + # No change in df1 and df2 + tm.assert_frame_equal(df1, df1_copy) + tm.assert_frame_equal(df2, df2_copy) + + # Check the expected types for the merged data frame + expected = Series( + [np.dtype("O"), pd.StringDtype(), np.dtype("O")], + index=MultiIndex.from_tuples( + [("lvl0", "lvl1-a"), ("lvl0", "lvl1-b"), ("lvl0", "lvl1-c")] + ), + ) + tm.assert_series_equal(merged.dtypes, expected) + + @pytest.mark.parametrize( + "left_empty, how, exp", + [ + (False, "left", "left"), + (False, "right", "empty"), + (False, "inner", "empty"), + (False, "outer", "left"), + (False, "cross", "empty_cross"), + (True, "left", "empty"), + (True, "right", "right"), + (True, "inner", "empty"), + (True, "outer", "right"), + (True, "cross", "empty_cross"), + ], + ) + def test_merge_empty(self, left_empty, how, exp): + left = DataFrame({"A": [2, 1], "B": [3, 4]}) + right = DataFrame({"A": [1], "C": [5]}, dtype="int64") + + if left_empty: + left = left.head(0) + else: + right = right.head(0) + + result = left.merge(right, how=how) + + if exp == "left": + expected = DataFrame({"A": [2, 1], "B": [3, 4], "C": [np.nan, np.nan]}) + elif exp == "right": + expected = DataFrame({"A": [1], "B": [np.nan], "C": [5]}) + elif exp == "empty": + expected = DataFrame(columns=["A", "B", "C"], dtype="int64") + elif exp == "empty_cross": + expected = DataFrame(columns=["A_x", "B", "A_y", "C"], dtype="int64") + + if how == "outer": + expected = expected.sort_values("A", ignore_index=True) + + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def left(): + return DataFrame( + { + "X": Series( + np.random.default_rng(2).choice(["foo", "bar"], size=(10,)) + ).astype(CategoricalDtype(["foo", "bar"])), + "Y": np.random.default_rng(2).choice(["one", "two", "three"], size=(10,)), + } + ) + + +@pytest.fixture +def right(): + return DataFrame( + { + "X": Series(["foo", "bar"]).astype(CategoricalDtype(["foo", "bar"])), + "Z": [1, 2], + } + ) + + +class TestMergeCategorical: + def test_identical(self, left, using_infer_string): + # merging on the same, should preserve dtypes + merged = merge(left, left, on="X") + result = merged.dtypes.sort_index() + dtype = np.dtype("O") if not using_infer_string else "string" + expected = Series( + [CategoricalDtype(categories=["foo", "bar"]), dtype, dtype], + index=["X", "Y_x", "Y_y"], + ) + tm.assert_series_equal(result, expected) + + def test_basic(self, left, right, using_infer_string): + # we have matching Categorical dtypes in X + # so should preserve the merged column + merged = merge(left, right, on="X") + result = merged.dtypes.sort_index() + dtype = np.dtype("O") if not using_infer_string else "string" + expected = Series( + [ + CategoricalDtype(categories=["foo", "bar"]), + dtype, + np.dtype("int64"), + ], + index=["X", "Y", "Z"], + ) + tm.assert_series_equal(result, expected) + + def test_merge_categorical(self): + # GH 9426 + + right = DataFrame( + { + "c": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e"}, + "d": {0: "null", 1: "null", 2: "null", 3: "null", 4: "null"}, + } + ) + left = DataFrame( + { + "a": {0: "f", 1: "f", 2: "f", 3: "f", 4: "f"}, + "b": {0: "g", 1: "g", 2: "g", 3: "g", 4: "g"}, + } + ) + df = merge(left, right, how="left", left_on="b", right_on="c") + + # object-object + expected = df.copy() + + # object-cat + # note that we propagate the category + # because we don't have any matching rows + cright = right.copy() + cright["d"] = cright["d"].astype("category") + result = merge(left, cright, how="left", left_on="b", right_on="c") + expected["d"] = expected["d"].astype(CategoricalDtype(["null"])) + tm.assert_frame_equal(result, expected) + + # cat-object + cleft = left.copy() + cleft["b"] = cleft["b"].astype("category") + result = merge(cleft, cright, how="left", left_on="b", right_on="c") + tm.assert_frame_equal(result, expected) + + # cat-cat + cright = right.copy() + cright["d"] = cright["d"].astype("category") + cleft = left.copy() + cleft["b"] = cleft["b"].astype("category") + result = merge(cleft, cright, how="left", left_on="b", right_on="c") + tm.assert_frame_equal(result, expected) + + def tests_merge_categorical_unordered_equal(self): + # GH-19551 + df1 = DataFrame( + { + "Foo": Categorical(["A", "B", "C"], categories=["A", "B", "C"]), + "Left": ["A0", "B0", "C0"], + } + ) + + df2 = DataFrame( + { + "Foo": Categorical(["C", "B", "A"], categories=["C", "B", "A"]), + "Right": ["C1", "B1", "A1"], + } + ) + result = merge(df1, df2, on=["Foo"]) + expected = DataFrame( + { + "Foo": Categorical(["A", "B", "C"]), + "Left": ["A0", "B0", "C0"], + "Right": ["A1", "B1", "C1"], + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_multiindex_merge_with_unordered_categoricalindex(self, ordered): + # GH 36973 + pcat = CategoricalDtype(categories=["P2", "P1"], ordered=ordered) + df1 = DataFrame( + { + "id": ["C", "C", "D"], + "p": Categorical(["P2", "P1", "P2"], dtype=pcat), + "a": [0, 1, 2], + } + ).set_index(["id", "p"]) + df2 = DataFrame( + { + "id": ["A", "C", "C"], + "p": Categorical(["P2", "P2", "P1"], dtype=pcat), + "d1": [10, 11, 12], + } + ).set_index(["id", "p"]) + result = merge(df1, df2, how="left", left_index=True, right_index=True) + expected = DataFrame( + { + "id": ["C", "C", "D"], + "p": Categorical(["P2", "P1", "P2"], dtype=pcat), + "a": [0, 1, 2], + "d1": [11.0, 12.0, np.nan], + } + ).set_index(["id", "p"]) + tm.assert_frame_equal(result, expected) + + def test_other_columns(self, left, right, using_infer_string): + # non-merge columns should preserve if possible + right = right.assign(Z=right.Z.astype("category")) + + merged = merge(left, right, on="X") + result = merged.dtypes.sort_index() + dtype = np.dtype("O") if not using_infer_string else "string" + expected = Series( + [ + CategoricalDtype(categories=["foo", "bar"]), + dtype, + CategoricalDtype(categories=[1, 2]), + ], + index=["X", "Y", "Z"], + ) + tm.assert_series_equal(result, expected) + + # categories are preserved + assert left.X.values._categories_match_up_to_permutation(merged.X.values) + assert right.Z.values._categories_match_up_to_permutation(merged.Z.values) + + @pytest.mark.parametrize( + "change", + [ + lambda x: x, + lambda x: x.astype(CategoricalDtype(["foo", "bar", "bah"])), + lambda x: x.astype(CategoricalDtype(ordered=True)), + ], + ) + def test_dtype_on_merged_different( + self, change, join_type, left, right, using_infer_string + ): + # our merging columns, X now has 2 different dtypes + # so we must be object as a result + + X = change(right.X.astype("object")) + right = right.assign(X=X) + assert isinstance(left.X.values.dtype, CategoricalDtype) + # assert not left.X.values._categories_match_up_to_permutation(right.X.values) + + merged = merge(left, right, on="X", how=join_type) + + result = merged.dtypes.sort_index() + dtype = np.dtype("O") if not using_infer_string else "string" + expected = Series([dtype, dtype, np.dtype("int64")], index=["X", "Y", "Z"]) + tm.assert_series_equal(result, expected) + + def test_self_join_multiple_categories(self): + # GH 16767 + # non-duplicates should work with multiple categories + m = 5 + df = DataFrame( + { + "a": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"] * m, + "b": ["t", "w", "x", "y", "z"] * 2 * m, + "c": [ + letter + for each in ["m", "n", "u", "p", "o"] + for letter in [each] * 2 * m + ], + "d": [ + letter + for each in [ + "aa", + "bb", + "cc", + "dd", + "ee", + "ff", + "gg", + "hh", + "ii", + "jj", + ] + for letter in [each] * m + ], + } + ) + + # change them all to categorical variables + df = df.apply(lambda x: x.astype("category")) + + # self-join should equal ourselves + result = merge(df, df, on=list(df.columns)) + + tm.assert_frame_equal(result, df) + + def test_dtype_on_categorical_dates(self): + # GH 16900 + # dates should not be coerced to ints + + df = DataFrame( + [[date(2001, 1, 1), 1.1], [date(2001, 1, 2), 1.3]], columns=["date", "num2"] + ) + df["date"] = df["date"].astype("category") + + df2 = DataFrame( + [[date(2001, 1, 1), 1.3], [date(2001, 1, 3), 1.4]], columns=["date", "num4"] + ) + df2["date"] = df2["date"].astype("category") + + expected_outer = DataFrame( + [ + [pd.Timestamp("2001-01-01").date(), 1.1, 1.3], + [pd.Timestamp("2001-01-02").date(), 1.3, np.nan], + [pd.Timestamp("2001-01-03").date(), np.nan, 1.4], + ], + columns=["date", "num2", "num4"], + ) + result_outer = merge(df, df2, how="outer", on=["date"]) + tm.assert_frame_equal(result_outer, expected_outer) + + expected_inner = DataFrame( + [[pd.Timestamp("2001-01-01").date(), 1.1, 1.3]], + columns=["date", "num2", "num4"], + ) + result_inner = merge(df, df2, how="inner", on=["date"]) + tm.assert_frame_equal(result_inner, expected_inner) + + @pytest.mark.parametrize("ordered", [True, False]) + @pytest.mark.parametrize( + "category_column,categories,expected_categories", + [ + ([False, True, True, False], [True, False], [True, False]), + ([2, 1, 1, 2], [1, 2], [1, 2]), + (["False", "True", "True", "False"], ["True", "False"], ["True", "False"]), + ], + ) + def test_merging_with_bool_or_int_cateorical_column( + self, category_column, categories, expected_categories, ordered + ): + # GH 17187 + # merging with a boolean/int categorical column + df1 = DataFrame({"id": [1, 2, 3, 4], "cat": category_column}) + df1["cat"] = df1["cat"].astype(CategoricalDtype(categories, ordered=ordered)) + df2 = DataFrame({"id": [2, 4], "num": [1, 9]}) + result = df1.merge(df2) + expected = DataFrame({"id": [2, 4], "cat": expected_categories, "num": [1, 9]}) + expected["cat"] = expected["cat"].astype( + CategoricalDtype(categories, ordered=ordered) + ) + tm.assert_frame_equal(expected, result) + + def test_merge_on_int_array(self): + # GH 23020 + df = DataFrame({"A": Series([1, 2, np.nan], dtype="Int64"), "B": 1}) + result = merge(df, df, on="A") + expected = DataFrame( + {"A": Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1} + ) + tm.assert_frame_equal(result, expected) + + +@pytest.fixture +def left_df(): + return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0]) + + +@pytest.fixture +def right_df(): + return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2]) + + +class TestMergeOnIndexes: + @pytest.mark.parametrize( + "how, sort, expected", + [ + ("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])), + ("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])), + ( + "left", + False, + DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]), + ), + ( + "left", + True, + DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]), + ), + ( + "right", + False, + DataFrame( + {"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2] + ), + ), + ( + "right", + True, + DataFrame( + {"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3] + ), + ), + ( + "outer", + False, + DataFrame( + {"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]}, + index=[0, 1, 2, 3], + ), + ), + ( + "outer", + True, + DataFrame( + {"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]}, + index=[0, 1, 2, 3], + ), + ), + ], + ) + def test_merge_on_indexes(self, left_df, right_df, how, sort, expected): + result = merge( + left_df, right_df, left_index=True, right_index=True, how=how, sort=sort + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "index", + [Index([1, 2], dtype=dtyp, name="index_col") for dtyp in tm.ALL_REAL_NUMPY_DTYPES] + + [ + CategoricalIndex(["A", "B"], categories=["A", "B"], name="index_col"), + RangeIndex(start=0, stop=2, name="index_col"), + DatetimeIndex(["2018-01-01", "2018-01-02"], name="index_col"), + ], + ids=lambda x: f"{type(x).__name__}[{x.dtype}]", +) +def test_merge_index_types(index): + # gh-20777 + # assert key access is consistent across index types + left = DataFrame({"left_data": [1, 2]}, index=index) + right = DataFrame({"right_data": [1.0, 2.0]}, index=index) + + result = left.merge(right, on=["index_col"]) + + expected = DataFrame({"left_data": [1, 2], "right_data": [1.0, 2.0]}, index=index) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "on,left_on,right_on,left_index,right_index,nm", + [ + (["outer", "inner"], None, None, False, False, "B"), + (None, None, None, True, True, "B"), + (None, ["outer", "inner"], None, False, True, "B"), + (None, None, ["outer", "inner"], True, False, "B"), + (["outer", "inner"], None, None, False, False, None), + (None, None, None, True, True, None), + (None, ["outer", "inner"], None, False, True, None), + (None, None, ["outer", "inner"], True, False, None), + ], +) +def test_merge_series(on, left_on, right_on, left_index, right_index, nm): + # GH 21220 + a = DataFrame( + {"A": [1, 2, 3, 4]}, + index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]), + ) + b = Series( + [1, 2, 3, 4], + index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]), + name=nm, + ) + expected = DataFrame( + {"A": [2, 4], "B": [1, 3]}, + index=MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]), + ) + if nm is not None: + result = merge( + a, + b, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + ) + tm.assert_frame_equal(result, expected) + else: + msg = "Cannot merge a Series without a name" + with pytest.raises(ValueError, match=msg): + result = merge( + a, + b, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + ) + + +def test_merge_series_multilevel(): + # GH#47946 + # GH 40993: For raising, enforced in 2.0 + a = DataFrame( + {"A": [1, 2, 3, 4]}, + index=MultiIndex.from_product([["a", "b"], [0, 1]], names=["outer", "inner"]), + ) + b = Series( + [1, 2, 3, 4], + index=MultiIndex.from_product([["a", "b"], [1, 2]], names=["outer", "inner"]), + name=("B", "C"), + ) + with pytest.raises( + MergeError, match="Not allowed to merge between different levels" + ): + merge(a, b, on=["outer", "inner"]) + + +@pytest.mark.parametrize( + "col1, col2, kwargs, expected_cols", + [ + (0, 0, {"suffixes": ("", "_dup")}, ["0", "0_dup"]), + (0, 0, {"suffixes": (None, "_dup")}, [0, "0_dup"]), + (0, 0, {"suffixes": ("_x", "_y")}, ["0_x", "0_y"]), + (0, 0, {"suffixes": ["_x", "_y"]}, ["0_x", "0_y"]), + ("a", 0, {"suffixes": (None, "_y")}, ["a", 0]), + (0.0, 0.0, {"suffixes": ("_x", None)}, ["0.0_x", 0.0]), + ("b", "b", {"suffixes": (None, "_y")}, ["b", "b_y"]), + ("a", "a", {"suffixes": ("_x", None)}, ["a_x", "a"]), + ("a", "b", {"suffixes": ("_x", None)}, ["a", "b"]), + ("a", "a", {"suffixes": (None, "_x")}, ["a", "a_x"]), + (0, 0, {"suffixes": ("_a", None)}, ["0_a", 0]), + ("a", "a", {}, ["a_x", "a_y"]), + (0, 0, {}, ["0_x", "0_y"]), + ], +) +def test_merge_suffix(col1, col2, kwargs, expected_cols): + # issue: 24782 + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [4, 5, 6]}) + + expected = DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols) + + result = a.merge(b, left_index=True, right_index=True, **kwargs) + tm.assert_frame_equal(result, expected) + + result = merge(a, b, left_index=True, right_index=True, **kwargs) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "how,expected", + [ + ( + "right", + DataFrame( + {"A": [100, 200, 300], "B1": [60, 70, np.nan], "B2": [600, 700, 800]} + ), + ), + ( + "outer", + DataFrame( + { + "A": [1, 100, 200, 300], + "B1": [80, 60, 70, np.nan], + "B2": [np.nan, 600, 700, 800], + } + ), + ), + ], +) +def test_merge_duplicate_suffix(how, expected): + left_df = DataFrame({"A": [100, 200, 1], "B": [60, 70, 80]}) + right_df = DataFrame({"A": [100, 200, 300], "B": [600, 700, 800]}) + result = merge(left_df, right_df, on="A", how=how, suffixes=("_x", "_x")) + expected.columns = ["A", "B_x", "B_x"] + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "col1, col2, suffixes", + [("a", "a", (None, None)), ("a", "a", ("", None)), (0, 0, (None, ""))], +) +def test_merge_suffix_error(col1, col2, suffixes): + # issue: 24782 + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [3, 4, 5]}) + + # TODO: might reconsider current raise behaviour, see issue 24782 + msg = "columns overlap but no suffix specified" + with pytest.raises(ValueError, match=msg): + merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + + +@pytest.mark.parametrize("suffixes", [{"left", "right"}, {"left": 0, "right": 0}]) +def test_merge_suffix_raises(suffixes): + a = DataFrame({"a": [1, 2, 3]}) + b = DataFrame({"b": [3, 4, 5]}) + + with pytest.raises(TypeError, match="Passing 'suffixes' as a"): + merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + + +@pytest.mark.parametrize( + "col1, col2, suffixes, msg", + [ + ("a", "a", ("a", "b", "c"), r"too many values to unpack \(expected 2\)"), + ("a", "a", tuple("a"), r"not enough values to unpack \(expected 2, got 1\)"), + ], +) +def test_merge_suffix_length_error(col1, col2, suffixes, msg): + a = DataFrame({col1: [1, 2, 3]}) + b = DataFrame({col2: [3, 4, 5]}) + + with pytest.raises(ValueError, match=msg): + merge(a, b, left_index=True, right_index=True, suffixes=suffixes) + + +@pytest.mark.parametrize("cat_dtype", ["one", "two"]) +@pytest.mark.parametrize("reverse", [True, False]) +def test_merge_equal_cat_dtypes(cat_dtype, reverse): + # see gh-22501 + cat_dtypes = { + "one": CategoricalDtype(categories=["a", "b", "c"], ordered=False), + "two": CategoricalDtype(categories=["a", "b", "c"], ordered=False), + } + + df1 = DataFrame( + {"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), "left": [1, 2, 3]} + ).set_index("foo") + + data_foo = ["a", "b", "c"] + data_right = [1, 2, 3] + + if reverse: + data_foo.reverse() + data_right.reverse() + + df2 = DataFrame( + {"foo": Series(data_foo).astype(cat_dtypes[cat_dtype]), "right": data_right} + ).set_index("foo") + + result = df1.merge(df2, left_index=True, right_index=True) + + expected = DataFrame( + { + "left": [1, 2, 3], + "right": [1, 2, 3], + "foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), + } + ).set_index("foo") + + tm.assert_frame_equal(result, expected) + + +def test_merge_equal_cat_dtypes2(): + # see gh-22501 + cat_dtype = CategoricalDtype(categories=["a", "b", "c"], ordered=False) + + # Test Data + df1 = DataFrame( + {"foo": Series(["a", "b"]).astype(cat_dtype), "left": [1, 2]} + ).set_index("foo") + + df2 = DataFrame( + {"foo": Series(["a", "b", "c"]).astype(cat_dtype), "right": [3, 2, 1]} + ).set_index("foo") + + result = df1.merge(df2, left_index=True, right_index=True) + + expected = DataFrame( + {"left": [1, 2], "right": [3, 2], "foo": Series(["a", "b"]).astype(cat_dtype)} + ).set_index("foo") + + tm.assert_frame_equal(result, expected) + + +def test_merge_on_cat_and_ext_array(): + # GH 28668 + right = DataFrame( + {"a": Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")} + ) + left = right.copy() + left["a"] = left["a"].astype("category") + + result = merge(left, right, how="inner", on="a") + expected = right.copy() + + tm.assert_frame_equal(result, expected) + + +def test_merge_multiindex_columns(): + # Issue #28518 + # Verify that merging two dataframes give the expected labels + # The original cause of this issue come from a bug lexsort_depth and is tested in + # test_lexsort_depth + + letters = ["a", "b", "c", "d"] + numbers = ["1", "2", "3"] + index = MultiIndex.from_product((letters, numbers), names=["outer", "inner"]) + + frame_x = DataFrame(columns=index) + frame_x["id"] = "" + frame_y = DataFrame(columns=index) + frame_y["id"] = "" + + l_suf = "_x" + r_suf = "_y" + result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf))) + + # Constructing the expected results + tuples = [(letter + l_suf, num) for letter in letters for num in numbers] + tuples += [("id", "")] + tuples += [(letter + r_suf, num) for letter in letters for num in numbers] + + expected_index = MultiIndex.from_tuples(tuples, names=["outer", "inner"]) + expected = DataFrame(columns=expected_index) + + tm.assert_frame_equal(result, expected, check_dtype=False) + + +def test_merge_datetime_upcast_dtype(): + # https://github.com/pandas-dev/pandas/issues/31208 + df1 = DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]}) + df2 = DataFrame( + {"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])} + ) + result = merge(df1, df2, how="left", on="y") + expected = DataFrame( + { + "x": ["a", "b", "c"], + "y": ["1", "2", "4"], + "z": pd.to_datetime(["2000", "2001", "NaT"]), + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("n_categories", [5, 128]) +def test_categorical_non_unique_monotonic(n_categories): + # GH 28189 + # With n_categories as 5, we test the int8 case is hit in libjoin, + # with n_categories as 128 we test the int16 case. + left_index = CategoricalIndex([0] + list(range(n_categories))) + df1 = DataFrame(range(n_categories + 1), columns=["value"], index=left_index) + df2 = DataFrame( + [[6]], + columns=["value"], + index=CategoricalIndex([0], categories=list(range(n_categories))), + ) + + result = merge(df1, df2, how="left", left_index=True, right_index=True) + expected = DataFrame( + [[i, 6.0] if i < 2 else [i, np.nan] for i in range(n_categories + 1)], + columns=["value_x", "value_y"], + index=left_index, + ) + tm.assert_frame_equal(expected, result) + + +def test_merge_join_categorical_multiindex(): + # From issue 16627 + a = { + "Cat1": Categorical(["a", "b", "a", "c", "a", "b"], ["a", "b", "c"]), + "Int1": [0, 1, 0, 1, 0, 0], + } + a = DataFrame(a) + + b = { + "Cat": Categorical(["a", "b", "c", "a", "b", "c"], ["a", "b", "c"]), + "Int": [0, 0, 0, 1, 1, 1], + "Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6], + } + b = DataFrame(b).set_index(["Cat", "Int"])["Factor"] + + expected = merge( + a, + b.reset_index(), + left_on=["Cat1", "Int1"], + right_on=["Cat", "Int"], + how="left", + ) + expected = expected.drop(["Cat", "Int"], axis=1) + result = a.join(b, on=["Cat1", "Int1"]) + tm.assert_frame_equal(expected, result) + + # Same test, but with ordered categorical + a = { + "Cat1": Categorical( + ["a", "b", "a", "c", "a", "b"], ["b", "a", "c"], ordered=True + ), + "Int1": [0, 1, 0, 1, 0, 0], + } + a = DataFrame(a) + + b = { + "Cat": Categorical( + ["a", "b", "c", "a", "b", "c"], ["b", "a", "c"], ordered=True + ), + "Int": [0, 0, 0, 1, 1, 1], + "Factor": [1.1, 1.2, 1.3, 1.4, 1.5, 1.6], + } + b = DataFrame(b).set_index(["Cat", "Int"])["Factor"] + + expected = merge( + a, + b.reset_index(), + left_on=["Cat1", "Int1"], + right_on=["Cat", "Int"], + how="left", + ) + expected = expected.drop(["Cat", "Int"], axis=1) + result = a.join(b, on=["Cat1", "Int1"]) + tm.assert_frame_equal(expected, result) + + +@pytest.mark.parametrize("func", ["merge", "merge_asof"]) +@pytest.mark.parametrize( + ("kwargs", "err_msg"), + [ + ({"left_on": "a", "left_index": True}, ["left_on", "left_index"]), + ({"right_on": "a", "right_index": True}, ["right_on", "right_index"]), + ], +) +def test_merge_join_cols_error_reporting_duplicates(func, kwargs, err_msg): + # GH: 16228 + left = DataFrame({"a": [1, 2], "b": [3, 4]}) + right = DataFrame({"a": [1, 1], "c": [5, 6]}) + msg = rf'Can only pass argument "{err_msg[0]}" OR "{err_msg[1]}" not both\.' + with pytest.raises(MergeError, match=msg): + getattr(pd, func)(left, right, **kwargs) + + +@pytest.mark.parametrize("func", ["merge", "merge_asof"]) +@pytest.mark.parametrize( + ("kwargs", "err_msg"), + [ + ({"left_on": "a"}, ["right_on", "right_index"]), + ({"right_on": "a"}, ["left_on", "left_index"]), + ], +) +def test_merge_join_cols_error_reporting_missing(func, kwargs, err_msg): + # GH: 16228 + left = DataFrame({"a": [1, 2], "b": [3, 4]}) + right = DataFrame({"a": [1, 1], "c": [5, 6]}) + msg = rf'Must pass "{err_msg[0]}" OR "{err_msg[1]}"\.' + with pytest.raises(MergeError, match=msg): + getattr(pd, func)(left, right, **kwargs) + + +@pytest.mark.parametrize("func", ["merge", "merge_asof"]) +@pytest.mark.parametrize( + "kwargs", + [ + {"right_index": True}, + {"left_index": True}, + ], +) +def test_merge_join_cols_error_reporting_on_and_index(func, kwargs): + # GH: 16228 + left = DataFrame({"a": [1, 2], "b": [3, 4]}) + right = DataFrame({"a": [1, 1], "c": [5, 6]}) + msg = ( + r'Can only pass argument "on" OR "left_index" ' + r'and "right_index", not a combination of both\.' + ) + with pytest.raises(MergeError, match=msg): + getattr(pd, func)(left, right, on="a", **kwargs) + + +def test_merge_right_left_index(): + # GH#38616 + left = DataFrame({"x": [1, 1], "z": ["foo", "foo"]}) + right = DataFrame({"x": [1, 1], "z": ["foo", "foo"]}) + result = merge(left, right, how="right", left_index=True, right_on="x") + expected = DataFrame( + { + "x": [1, 1], + "x_x": [1, 1], + "z_x": ["foo", "foo"], + "x_y": [1, 1], + "z_y": ["foo", "foo"], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_result_empty_index_and_on(): + # GH#33814 + df1 = DataFrame({"a": [1], "b": [2]}).set_index(["a", "b"]) + df2 = DataFrame({"b": [1]}).set_index(["b"]) + expected = DataFrame({"a": [], "b": []}, dtype=np.int64).set_index(["a", "b"]) + result = merge(df1, df2, left_on=["b"], right_index=True) + tm.assert_frame_equal(result, expected) + + result = merge(df2, df1, left_index=True, right_on=["b"]) + tm.assert_frame_equal(result, expected) + + +def test_merge_suffixes_produce_dup_columns_raises(): + # GH#22818; Enforced in 2.0 + left = DataFrame({"a": [1, 2, 3], "b": 1, "b_x": 2}) + right = DataFrame({"a": [1, 2, 3], "b": 2}) + + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): + merge(left, right, on="a") + + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): + merge(right, left, on="a", suffixes=("_y", "_x")) + + +def test_merge_duplicate_columns_with_suffix_no_warning(): + # GH#22818 + # Do not raise warning when duplicates are caused by duplicates in origin + left = DataFrame([[1, 1, 1], [2, 2, 2]], columns=["a", "b", "b"]) + right = DataFrame({"a": [1, 3], "b": 2}) + result = merge(left, right, on="a") + expected = DataFrame([[1, 1, 1, 2]], columns=["a", "b_x", "b_x", "b_y"]) + tm.assert_frame_equal(result, expected) + + +def test_merge_duplicate_columns_with_suffix_causing_another_duplicate_raises(): + # GH#22818, Enforced in 2.0 + # This should raise warning because suffixes cause another collision + left = DataFrame([[1, 1, 1, 1], [2, 2, 2, 2]], columns=["a", "b", "b", "b_x"]) + right = DataFrame({"a": [1, 3], "b": 2}) + with pytest.raises(MergeError, match="Passing 'suffixes' which cause duplicate"): + merge(left, right, on="a") + + +def test_merge_string_float_column_result(): + # GH 13353 + df1 = DataFrame([[1, 2], [3, 4]], columns=Index(["a", 114.0])) + df2 = DataFrame([[9, 10], [11, 12]], columns=["x", "y"]) + result = merge(df2, df1, how="inner", left_index=True, right_index=True) + expected = DataFrame( + [[9, 10, 1, 2], [11, 12, 3, 4]], columns=Index(["x", "y", "a", 114.0]) + ) + tm.assert_frame_equal(result, expected) + + +def test_mergeerror_on_left_index_mismatched_dtypes(): + # GH 22449 + df_1 = DataFrame(data=["X"], columns=["C"], index=[22]) + df_2 = DataFrame(data=["X"], columns=["C"], index=[999]) + with pytest.raises(MergeError, match="Can only pass argument"): + merge(df_1, df_2, on=["C"], left_index=True) + + +def test_merge_on_left_categoricalindex(): + # GH#48464 don't raise when left_on is a CategoricalIndex + ci = CategoricalIndex(range(3)) + + right = DataFrame({"A": ci, "B": range(3)}) + left = DataFrame({"C": range(3, 6)}) + + res = merge(left, right, left_on=ci, right_on="A") + expected = merge(left, right, left_on=ci._data, right_on="A") + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize("dtype", [None, "Int64"]) +def test_merge_outer_with_NaN(dtype): + # GH#43550 + left = DataFrame({"key": [1, 2], "col1": [1, 2]}, dtype=dtype) + right = DataFrame({"key": [np.nan, np.nan], "col2": [3, 4]}, dtype=dtype) + result = merge(left, right, on="key", how="outer") + expected = DataFrame( + { + "key": [1, 2, np.nan, np.nan], + "col1": [1, 2, np.nan, np.nan], + "col2": [np.nan, np.nan, 3, 4], + }, + dtype=dtype, + ) + tm.assert_frame_equal(result, expected) + + # switch left and right + result = merge(right, left, on="key", how="outer") + expected = DataFrame( + { + "key": [1, 2, np.nan, np.nan], + "col2": [np.nan, np.nan, 3, 4], + "col1": [1, 2, np.nan, np.nan], + }, + dtype=dtype, + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_different_index_names(): + # GH#45094 + left = DataFrame({"a": [1]}, index=Index([1], name="c")) + right = DataFrame({"a": [1]}, index=Index([1], name="d")) + result = merge(left, right, left_on="c", right_on="d") + expected = DataFrame({"a_x": [1], "a_y": 1}) + tm.assert_frame_equal(result, expected) + + +def test_merge_ea(any_numeric_ea_dtype, join_type): + # GH#44240 + left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype) + right = DataFrame({"a": [1, 2, 3], "c": 2}, dtype=any_numeric_ea_dtype) + result = left.merge(right, how=join_type) + expected = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2}, dtype=any_numeric_ea_dtype) + tm.assert_frame_equal(result, expected) + + +def test_merge_ea_and_non_ea(any_numeric_ea_dtype, join_type): + # GH#44240 + left = DataFrame({"a": [1, 2, 3], "b": 1}, dtype=any_numeric_ea_dtype) + right = DataFrame({"a": [1, 2, 3], "c": 2}, dtype=any_numeric_ea_dtype.lower()) + result = left.merge(right, how=join_type) + expected = DataFrame( + { + "a": Series([1, 2, 3], dtype=any_numeric_ea_dtype), + "b": Series([1, 1, 1], dtype=any_numeric_ea_dtype), + "c": Series([2, 2, 2], dtype=any_numeric_ea_dtype.lower()), + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["int64", "int64[pyarrow]"]) +def test_merge_arrow_and_numpy_dtypes(dtype): + # GH#52406 + pytest.importorskip("pyarrow") + df = DataFrame({"a": [1, 2]}, dtype=dtype) + df2 = DataFrame({"a": [1, 2]}, dtype="int64[pyarrow]") + result = df.merge(df2) + expected = df.copy() + tm.assert_frame_equal(result, expected) + + result = df2.merge(df) + expected = df2.copy() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("how", ["inner", "left", "outer", "right"]) +@pytest.mark.parametrize("tz", [None, "America/Chicago"]) +def test_merge_datetime_different_resolution(tz, how): + # https://github.com/pandas-dev/pandas/issues/53200 + vals = [ + pd.Timestamp(2023, 5, 12, tz=tz), + pd.Timestamp(2023, 5, 13, tz=tz), + pd.Timestamp(2023, 5, 14, tz=tz), + ] + df1 = DataFrame({"t": vals[:2], "a": [1.0, 2.0]}) + df1["t"] = df1["t"].dt.as_unit("ns") + df2 = DataFrame({"t": vals[1:], "b": [1.0, 2.0]}) + df2["t"] = df2["t"].dt.as_unit("s") + + expected = DataFrame({"t": vals, "a": [1.0, 2.0, np.nan], "b": [np.nan, 1.0, 2.0]}) + expected["t"] = expected["t"].dt.as_unit("ns") + if how == "inner": + expected = expected.iloc[[1]].reset_index(drop=True) + elif how == "left": + expected = expected.iloc[[0, 1]] + elif how == "right": + expected = expected.iloc[[1, 2]].reset_index(drop=True) + + result = df1.merge(df2, on="t", how=how) + tm.assert_frame_equal(result, expected) + + +def test_merge_multiindex_single_level(): + # GH52331 + df = DataFrame({"col": ["A", "B"]}) + df2 = DataFrame( + data={"b": [100]}, + index=MultiIndex.from_tuples([("A",), ("C",)], names=["col"]), + ) + expected = DataFrame({"col": ["A", "B"], "b": [100, np.nan]}) + + result = df.merge(df2, left_on=["col"], right_index=True, how="left") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) +@pytest.mark.parametrize("sort", [True, False]) +@pytest.mark.parametrize("on_index", [True, False]) +@pytest.mark.parametrize("left_unique", [True, False]) +@pytest.mark.parametrize("left_monotonic", [True, False]) +@pytest.mark.parametrize("right_unique", [True, False]) +@pytest.mark.parametrize("right_monotonic", [True, False]) +def test_merge_combinations( + how, sort, on_index, left_unique, left_monotonic, right_unique, right_monotonic +): + # GH 54611 + left = [2, 3] + if left_unique: + left.append(4 if left_monotonic else 1) + else: + left.append(3 if left_monotonic else 2) + + right = [2, 3] + if right_unique: + right.append(4 if right_monotonic else 1) + else: + right.append(3 if right_monotonic else 2) + + left = DataFrame({"key": left}) + right = DataFrame({"key": right}) + + if on_index: + left = left.set_index("key") + right = right.set_index("key") + on_kwargs = {"left_index": True, "right_index": True} + else: + on_kwargs = {"on": "key"} + + result = merge(left, right, how=how, sort=sort, **on_kwargs) + + if on_index: + left = left.reset_index() + right = right.reset_index() + + if how in ["left", "right", "inner"]: + if how in ["left", "inner"]: + expected, other, other_unique = left, right, right_unique + else: + expected, other, other_unique = right, left, left_unique + if how == "inner": + keep_values = set(left["key"].values).intersection(right["key"].values) + keep_mask = expected["key"].isin(keep_values) + expected = expected[keep_mask] + if sort: + expected = expected.sort_values("key") + if not other_unique: + other_value_counts = other["key"].value_counts() + repeats = other_value_counts.reindex(expected["key"].values, fill_value=1) + repeats = repeats.astype(np.intp) + expected = expected["key"].repeat(repeats.values) + expected = expected.to_frame() + elif how == "outer": + left_counts = left["key"].value_counts() + right_counts = right["key"].value_counts() + expected_counts = left_counts.mul(right_counts, fill_value=1) + expected_counts = expected_counts.astype(np.intp) + expected = expected_counts.index.values.repeat(expected_counts.values) + expected = DataFrame({"key": expected}) + expected = expected.sort_values("key") + + if on_index: + expected = expected.set_index("key") + else: + expected = expected.reset_index(drop=True) + + tm.assert_frame_equal(result, expected) + + +def test_merge_ea_int_and_float_numpy(): + # GH#46178 + df1 = DataFrame([1.0, np.nan], dtype=pd.Int64Dtype()) + df2 = DataFrame([1.5]) + expected = DataFrame(columns=[0], dtype="Int64") + + with tm.assert_produces_warning(UserWarning, match="You are merging"): + result = df1.merge(df2) + tm.assert_frame_equal(result, expected) + + with tm.assert_produces_warning(UserWarning, match="You are merging"): + result = df2.merge(df1) + tm.assert_frame_equal(result, expected.astype("float64")) + + df2 = DataFrame([1.0]) + expected = DataFrame([1], columns=[0], dtype="Int64") + result = df1.merge(df2) + tm.assert_frame_equal(result, expected) + + result = df2.merge(df1) + tm.assert_frame_equal(result, expected.astype("float64")) + + +def test_merge_arrow_string_index(any_string_dtype): + # GH#54894 + pytest.importorskip("pyarrow") + left = DataFrame({"a": ["a", "b"]}, dtype=any_string_dtype) + right = DataFrame({"b": 1}, index=Index(["a", "c"], dtype=any_string_dtype)) + result = left.merge(right, left_on="a", right_index=True, how="left") + expected = DataFrame( + {"a": Series(["a", "b"], dtype=any_string_dtype), "b": [1, np.nan]} + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("left_empty", [True, False]) +@pytest.mark.parametrize("right_empty", [True, False]) +def test_merge_empty_frames_column_order(left_empty, right_empty): + # GH 51929 + df1 = DataFrame(1, index=[0], columns=["A", "B"]) + df2 = DataFrame(1, index=[0], columns=["A", "C", "D"]) + + if left_empty: + df1 = df1.iloc[:0] + if right_empty: + df2 = df2.iloc[:0] + + result = merge(df1, df2, on=["A"], how="outer") + expected = DataFrame(1, index=[0], columns=["A", "B", "C", "D"]) + if left_empty and right_empty: + expected = expected.iloc[:0] + elif left_empty: + expected["B"] = np.nan + elif right_empty: + expected[["C", "D"]] = np.nan + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) +def test_merge_datetime_and_timedelta(how): + left = DataFrame({"key": Series([1, None], dtype="datetime64[ns]")}) + right = DataFrame({"key": Series([1], dtype="timedelta64[ns]")}) + + msg = ( + f"You are trying to merge on {left['key'].dtype} and {right['key'].dtype} " + "columns for key 'key'. If you wish to proceed you should use pd.concat" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + left.merge(right, on="key", how=how) + + msg = ( + f"You are trying to merge on {right['key'].dtype} and {left['key'].dtype} " + "columns for key 'key'. If you wish to proceed you should use pd.concat" + ) + with pytest.raises(ValueError, match=re.escape(msg)): + right.merge(left, on="key", how=how) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_asof.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_asof.py new file mode 100644 index 0000000000000000000000000000000000000000..a2e22ea73fd86ef1795298d99404bfdb04be81f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_asof.py @@ -0,0 +1,3657 @@ +import datetime + +import numpy as np +import pytest +import pytz + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Index, + Timedelta, + merge_asof, + option_context, + to_datetime, +) +import pandas._testing as tm +from pandas.core.reshape.merge import MergeError + + +@pytest.fixture(params=["s", "ms", "us", "ns"]) +def unit(request): + """ + Resolution for datetimelike dtypes. + """ + return request.param + + +class TestAsOfMerge: + def prep_data(self, df, dedupe=False): + if dedupe: + df = df.drop_duplicates(["time", "ticker"], keep="last").reset_index( + drop=True + ) + df.time = to_datetime(df.time) + return df + + @pytest.fixture + def trades(self): + df = pd.DataFrame( + [ + ["20160525 13:30:00.023", "MSFT", "51.9500", "75", "NASDAQ"], + ["20160525 13:30:00.038", "MSFT", "51.9500", "155", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.7700", "100", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9200", "100", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "200", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "300", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "600", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "44", "NASDAQ"], + ["20160525 13:30:00.074", "AAPL", "98.6700", "478343", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6700", "478343", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6600", "6", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "30", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "75", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "20", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "35", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "10", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"], + ["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "1000", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "300", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "400", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "600", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "783", "NASDAQ"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"], + ], + columns="time,ticker,price,quantity,marketCenter".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + return self.prep_data(df) + + @pytest.fixture + def quotes(self): + df = pd.DataFrame( + [ + ["20160525 13:30:00.023", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.023", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.041", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.072", "GOOG", "720.50", "720.88"], + ["20160525 13:30:00.075", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.92", "51.95"], + ], + columns="time,ticker,bid,ask".split(","), + ) + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df, dedupe=True) + + @pytest.fixture + def asof(self): + df = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df) + + @pytest.fixture + def tolerance(self): + df = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df) + + @pytest.fixture + def allow_exact_matches(self, datapath): + df = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + np.nan, + np.nan, + ], + ["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan], + ["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.95", + "51.95", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df) + + @pytest.fixture + def allow_exact_matches_and_tolerance(self): + df = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + np.nan, + np.nan, + ], + ["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan], + ["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.95", + "51.95", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + df["price"] = df["price"].astype("float64") + df["quantity"] = df["quantity"].astype("int64") + df["bid"] = df["bid"].astype("float64") + df["ask"] = df["ask"].astype("float64") + return self.prep_data(df) + + def test_examples1(self): + """doc-string examples""" + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]} + ) + + result = merge_asof(left, right, on="a") + tm.assert_frame_equal(result, expected) + + def test_examples2(self, unit): + """doc-string examples""" + if unit == "s": + pytest.skip( + "This test is invalid for unit='s' because that would " + "round the trades['time']]" + ) + trades = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.038", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + ] + ).astype(f"M8[{unit}]"), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + }, + columns=["time", "ticker", "price", "quantity"], + ) + + quotes = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.030", + "20160525 13:30:00.041", + "20160525 13:30:00.048", + "20160525 13:30:00.049", + "20160525 13:30:00.072", + "20160525 13:30:00.075", + ] + ).astype(f"M8[{unit}]"), + "ticker": [ + "GOOG", + "MSFT", + "MSFT", + "MSFT", + "GOOG", + "AAPL", + "GOOG", + "MSFT", + ], + "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], + "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03], + }, + columns=["time", "ticker", "bid", "ask"], + ) + + merge_asof(trades, quotes, on="time", by="ticker") + + merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("2ms")) + + expected = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.038", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + "20160525 13:30:00.048", + ] + ).astype(f"M8[{unit}]"), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + "bid": [np.nan, 51.97, np.nan, np.nan, np.nan], + "ask": [np.nan, 51.98, np.nan, np.nan, np.nan], + }, + columns=["time", "ticker", "price", "quantity", "bid", "ask"], + ) + + result = merge_asof( + trades, + quotes, + on="time", + by="ticker", + tolerance=Timedelta("10ms"), + allow_exact_matches=False, + ) + tm.assert_frame_equal(result, expected) + + def test_examples3(self): + """doc-string examples""" + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]} + ) + + result = merge_asof(left, right, on="a", direction="forward") + tm.assert_frame_equal(result, expected) + + def test_examples4(self): + """doc-string examples""" + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]} + ) + + result = merge_asof(left, right, on="a", direction="nearest") + tm.assert_frame_equal(result, expected) + + def test_basic(self, trades, asof, quotes): + expected = asof + + result = merge_asof(trades, quotes, on="time", by="ticker") + tm.assert_frame_equal(result, expected) + + def test_basic_categorical(self, trades, asof, quotes): + expected = asof + trades.ticker = trades.ticker.astype("category") + quotes.ticker = quotes.ticker.astype("category") + expected.ticker = expected.ticker.astype("category") + + result = merge_asof(trades, quotes, on="time", by="ticker") + tm.assert_frame_equal(result, expected) + + def test_basic_left_index(self, trades, asof, quotes): + # GH14253 + expected = asof + trades = trades.set_index("time") + + result = merge_asof( + trades, quotes, left_index=True, right_on="time", by="ticker" + ) + # left-only index uses right"s index, oddly + expected.index = result.index + # time column appears after left"s columns + expected = expected[result.columns] + tm.assert_frame_equal(result, expected) + + def test_basic_right_index(self, trades, asof, quotes): + expected = asof + quotes = quotes.set_index("time") + + result = merge_asof( + trades, quotes, left_on="time", right_index=True, by="ticker" + ) + tm.assert_frame_equal(result, expected) + + def test_basic_left_index_right_index(self, trades, asof, quotes): + expected = asof.set_index("time") + trades = trades.set_index("time") + quotes = quotes.set_index("time") + + result = merge_asof( + trades, quotes, left_index=True, right_index=True, by="ticker" + ) + tm.assert_frame_equal(result, expected) + + def test_multi_index_left(self, trades, quotes): + # MultiIndex is prohibited + trades = trades.set_index(["time", "price"]) + quotes = quotes.set_index("time") + with pytest.raises(MergeError, match="left can only have one index"): + merge_asof(trades, quotes, left_index=True, right_index=True) + + def test_multi_index_right(self, trades, quotes): + # MultiIndex is prohibited + trades = trades.set_index("time") + quotes = quotes.set_index(["time", "bid"]) + with pytest.raises(MergeError, match="right can only have one index"): + merge_asof(trades, quotes, left_index=True, right_index=True) + + def test_on_and_index_left_on(self, trades, quotes): + # "on" parameter and index together is prohibited + trades = trades.set_index("time") + quotes = quotes.set_index("time") + msg = 'Can only pass argument "left_on" OR "left_index" not both.' + with pytest.raises(MergeError, match=msg): + merge_asof( + trades, quotes, left_on="price", left_index=True, right_index=True + ) + + def test_on_and_index_right_on(self, trades, quotes): + trades = trades.set_index("time") + quotes = quotes.set_index("time") + msg = 'Can only pass argument "right_on" OR "right_index" not both.' + with pytest.raises(MergeError, match=msg): + merge_asof( + trades, quotes, right_on="bid", left_index=True, right_index=True + ) + + def test_basic_left_by_right_by(self, trades, asof, quotes): + # GH14253 + expected = asof + + result = merge_asof( + trades, quotes, on="time", left_by="ticker", right_by="ticker" + ) + tm.assert_frame_equal(result, expected) + + def test_missing_right_by(self, trades, asof, quotes): + expected = asof + + q = quotes[quotes.ticker != "MSFT"] + result = merge_asof(trades, q, on="time", by="ticker") + expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan + tm.assert_frame_equal(result, expected) + + def test_multiby(self): + # GH13936 + trades = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.046", + "20160525 13:30:00.048", + "20160525 13:30:00.050", + ] + ), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + }, + columns=["time", "ticker", "exch", "price", "quantity"], + ) + + quotes = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.030", + "20160525 13:30:00.041", + "20160525 13:30:00.045", + "20160525 13:30:00.049", + ] + ), + "ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"], + "exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"], + "bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99], + "ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01], + }, + columns=["time", "ticker", "exch", "bid", "ask"], + ) + + expected = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.046", + "20160525 13:30:00.048", + "20160525 13:30:00.050", + ] + ), + "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + "bid": [np.nan, 51.95, 720.50, 720.51, np.nan], + "ask": [np.nan, 51.96, 720.93, 720.92, np.nan], + }, + columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"], + ) + + result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["object", "string"]) + def test_multiby_heterogeneous_types(self, dtype): + # GH13936 + trades = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.046", + "20160525 13:30:00.048", + "20160525 13:30:00.050", + ] + ), + "ticker": [0, 0, 1, 1, 2], + "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + }, + columns=["time", "ticker", "exch", "price", "quantity"], + ) + trades = trades.astype({"ticker": dtype, "exch": dtype}) + + quotes = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.030", + "20160525 13:30:00.041", + "20160525 13:30:00.045", + "20160525 13:30:00.049", + ] + ), + "ticker": [1, 0, 0, 0, 1, 2], + "exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"], + "bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99], + "ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01], + }, + columns=["time", "ticker", "exch", "bid", "ask"], + ) + quotes = quotes.astype({"ticker": dtype, "exch": dtype}) + + expected = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.023", + "20160525 13:30:00.023", + "20160525 13:30:00.046", + "20160525 13:30:00.048", + "20160525 13:30:00.050", + ] + ), + "ticker": [0, 0, 1, 1, 2], + "exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"], + "price": [51.95, 51.95, 720.77, 720.92, 98.00], + "quantity": [75, 155, 100, 100, 100], + "bid": [np.nan, 51.95, 720.50, 720.51, np.nan], + "ask": [np.nan, 51.96, 720.93, 720.92, np.nan], + }, + columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"], + ) + expected = expected.astype({"ticker": dtype, "exch": dtype}) + + result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"]) + tm.assert_frame_equal(result, expected) + + def test_mismatched_index_dtype(self): + # similar to test_multiby_indexed, but we change the dtype on left.index + left = pd.DataFrame( + [ + [to_datetime("20160602"), 1, "a"], + [to_datetime("20160602"), 2, "a"], + [to_datetime("20160603"), 1, "b"], + [to_datetime("20160603"), 2, "b"], + ], + columns=["time", "k1", "k2"], + ).set_index("time") + # different dtype for the index + left.index = left.index - pd.Timestamp(0) + + right = pd.DataFrame( + [ + [to_datetime("20160502"), 1, "a", 1.0], + [to_datetime("20160502"), 2, "a", 2.0], + [to_datetime("20160503"), 1, "b", 3.0], + [to_datetime("20160503"), 2, "b", 4.0], + ], + columns=["time", "k1", "k2", "value"], + ).set_index("time") + + msg = "incompatible merge keys" + with pytest.raises(MergeError, match=msg): + merge_asof(left, right, left_index=True, right_index=True, by=["k1", "k2"]) + + def test_multiby_indexed(self): + # GH15676 + left = pd.DataFrame( + [ + [to_datetime("20160602"), 1, "a"], + [to_datetime("20160602"), 2, "a"], + [to_datetime("20160603"), 1, "b"], + [to_datetime("20160603"), 2, "b"], + ], + columns=["time", "k1", "k2"], + ).set_index("time") + + right = pd.DataFrame( + [ + [to_datetime("20160502"), 1, "a", 1.0], + [to_datetime("20160502"), 2, "a", 2.0], + [to_datetime("20160503"), 1, "b", 3.0], + [to_datetime("20160503"), 2, "b", 4.0], + ], + columns=["time", "k1", "k2", "value"], + ).set_index("time") + + expected = pd.DataFrame( + [ + [to_datetime("20160602"), 1, "a", 1.0], + [to_datetime("20160602"), 2, "a", 2.0], + [to_datetime("20160603"), 1, "b", 3.0], + [to_datetime("20160603"), 2, "b", 4.0], + ], + columns=["time", "k1", "k2", "value"], + ).set_index("time") + + result = merge_asof( + left, right, left_index=True, right_index=True, by=["k1", "k2"] + ) + + tm.assert_frame_equal(expected, result) + + with pytest.raises( + MergeError, match="left_by and right_by must be the same length" + ): + merge_asof( + left, + right, + left_index=True, + right_index=True, + left_by=["k1", "k2"], + right_by=["k1"], + ) + + def test_basic2(self, datapath): + expected = pd.DataFrame( + [ + [ + "20160525 13:30:00.023", + "MSFT", + "51.95", + "75", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.038", + "MSFT", + "51.95", + "155", + "NASDAQ", + "51.95", + "51.95", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.77", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.92", + "100", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "200", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "300", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "600", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.048", + "GOOG", + "720.93", + "44", + "NASDAQ", + "720.5", + "720.93", + ], + [ + "20160525 13:30:00.074", + "AAPL", + "98.67", + "478343", + "NASDAQ", + np.nan, + np.nan, + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.67", + "478343", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.66", + "6", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "30", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "75", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "20", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "35", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.65", + "10", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.075", + "AAPL", + "98.55", + "6", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "1000", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "300", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "400", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "600", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.076", + "AAPL", + "98.56", + "200", + "ARCA", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "783", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.078", + "MSFT", + "51.95", + "100", + "NASDAQ", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.084", + "AAPL", + "98.64", + "40", + "NASDAQ", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.084", + "AAPL", + "98.55", + "149", + "EDGX", + "98.55", + "98.56", + ], + [ + "20160525 13:30:00.086", + "AAPL", + "98.56", + "500", + "ARCA", + "98.55", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "647", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "300", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "50", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "50", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "70", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "70", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "1", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "62", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "10", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.104", + "AAPL", + "98.63", + "100", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.105", + "AAPL", + "98.63", + "100", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.105", + "AAPL", + "98.63", + "700", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.106", + "AAPL", + "98.63", + "61", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.107", + "AAPL", + "98.63", + "100", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.107", + "AAPL", + "98.63", + "53", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.108", + "AAPL", + "98.63", + "100", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.108", + "AAPL", + "98.63", + "839", + "ARCA", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.115", + "AAPL", + "98.63", + "5", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.118", + "AAPL", + "98.63", + "295", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.118", + "AAPL", + "98.63", + "5", + "EDGX", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.128", + "AAPL", + "98.63", + "100", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.128", + "AAPL", + "98.63", + "100", + "NASDAQ", + "98.62", + "98.63", + ], + [ + "20160525 13:30:00.128", + "MSFT", + "51.92", + "100", + "ARCA", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "100", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "10", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "59", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "31", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "69", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "12", + "NASDAQ", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "12", + "EDGX", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "100", + "ARCA", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.129", + "AAPL", + "98.62", + "100", + "ARCA", + "98.61", + "98.63", + ], + [ + "20160525 13:30:00.130", + "MSFT", + "51.95", + "317", + "ARCA", + "51.93", + "51.95", + ], + [ + "20160525 13:30:00.130", + "MSFT", + "51.95", + "283", + "ARCA", + "51.93", + "51.95", + ], + [ + "20160525 13:30:00.135", + "MSFT", + "51.93", + "100", + "EDGX", + "51.92", + "51.95", + ], + [ + "20160525 13:30:00.135", + "AAPL", + "98.62", + "100", + "ARCA", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "12", + "NASDAQ", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "88", + "NASDAQ", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "162", + "NASDAQ", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.61", + "100", + "BATS", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "61", + "ARCA", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "25", + "ARCA", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.144", + "AAPL", + "98.62", + "14", + "ARCA", + "98.61", + "98.62", + ], + [ + "20160525 13:30:00.145", + "AAPL", + "98.62", + "12", + "ARCA", + "98.6", + "98.63", + ], + [ + "20160525 13:30:00.145", + "AAPL", + "98.62", + "100", + "ARCA", + "98.6", + "98.63", + ], + [ + "20160525 13:30:00.145", + "AAPL", + "98.63", + "100", + "NASDAQ", + "98.6", + "98.63", + ], + [ + "20160525 13:30:00.145", + "AAPL", + "98.63", + "100", + "NASDAQ", + "98.6", + "98.63", + ], + ], + columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","), + ) + expected["price"] = expected["price"].astype("float64") + expected["quantity"] = expected["quantity"].astype("int64") + expected["bid"] = expected["bid"].astype("float64") + expected["ask"] = expected["ask"].astype("float64") + expected = self.prep_data(expected) + + trades = pd.DataFrame( + [ + ["20160525 13:30:00.023", "MSFT", "51.9500", "75", "NASDAQ"], + ["20160525 13:30:00.038", "MSFT", "51.9500", "155", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.7700", "100", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9200", "100", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "200", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "300", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "600", "NASDAQ"], + ["20160525 13:30:00.048", "GOOG", "720.9300", "44", "NASDAQ"], + ["20160525 13:30:00.074", "AAPL", "98.6700", "478343", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6700", "478343", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6600", "6", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "30", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "75", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "20", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "35", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.6500", "10", "NASDAQ"], + ["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"], + ["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "1000", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "300", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "400", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "600", "ARCA"], + ["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "783", "NASDAQ"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"], + ["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"], + ["20160525 13:30:00.084", "AAPL", "98.6400", "40", "NASDAQ"], + ["20160525 13:30:00.084", "AAPL", "98.5500", "149", "EDGX"], + ["20160525 13:30:00.086", "AAPL", "98.5600", "500", "ARCA"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "647", "EDGX"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "300", "EDGX"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "50", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "50", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "70", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "70", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "1", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "62", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "10", "NASDAQ"], + ["20160525 13:30:00.104", "AAPL", "98.6300", "100", "ARCA"], + ["20160525 13:30:00.105", "AAPL", "98.6300", "100", "ARCA"], + ["20160525 13:30:00.105", "AAPL", "98.6300", "700", "ARCA"], + ["20160525 13:30:00.106", "AAPL", "98.6300", "61", "EDGX"], + ["20160525 13:30:00.107", "AAPL", "98.6300", "100", "ARCA"], + ["20160525 13:30:00.107", "AAPL", "98.6300", "53", "ARCA"], + ["20160525 13:30:00.108", "AAPL", "98.6300", "100", "ARCA"], + ["20160525 13:30:00.108", "AAPL", "98.6300", "839", "ARCA"], + ["20160525 13:30:00.115", "AAPL", "98.6300", "5", "EDGX"], + ["20160525 13:30:00.118", "AAPL", "98.6300", "295", "EDGX"], + ["20160525 13:30:00.118", "AAPL", "98.6300", "5", "EDGX"], + ["20160525 13:30:00.128", "AAPL", "98.6300", "100", "NASDAQ"], + ["20160525 13:30:00.128", "AAPL", "98.6300", "100", "NASDAQ"], + ["20160525 13:30:00.128", "MSFT", "51.9200", "100", "ARCA"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "100", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "10", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "59", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "31", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "69", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "12", "NASDAQ"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "12", "EDGX"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "100", "ARCA"], + ["20160525 13:30:00.129", "AAPL", "98.6200", "100", "ARCA"], + ["20160525 13:30:00.130", "MSFT", "51.9500", "317", "ARCA"], + ["20160525 13:30:00.130", "MSFT", "51.9500", "283", "ARCA"], + ["20160525 13:30:00.135", "MSFT", "51.9300", "100", "EDGX"], + ["20160525 13:30:00.135", "AAPL", "98.6200", "100", "ARCA"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "12", "NASDAQ"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "88", "NASDAQ"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "162", "NASDAQ"], + ["20160525 13:30:00.144", "AAPL", "98.6100", "100", "BATS"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "61", "ARCA"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "25", "ARCA"], + ["20160525 13:30:00.144", "AAPL", "98.6200", "14", "ARCA"], + ["20160525 13:30:00.145", "AAPL", "98.6200", "12", "ARCA"], + ["20160525 13:30:00.145", "AAPL", "98.6200", "100", "ARCA"], + ["20160525 13:30:00.145", "AAPL", "98.6300", "100", "NASDAQ"], + ["20160525 13:30:00.145", "AAPL", "98.6300", "100", "NASDAQ"], + ], + columns="time,ticker,price,quantity,marketCenter".split(","), + ) + trades["price"] = trades["price"].astype("float64") + trades["quantity"] = trades["quantity"].astype("int64") + trades = self.prep_data(trades) + + quotes = pd.DataFrame( + [ + ["20160525 13:30:00.023", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.023", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.041", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"], + ["20160525 13:30:00.072", "GOOG", "720.50", "720.88"], + ["20160525 13:30:00.075", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.95", "51.95"], + ["20160525 13:30:00.078", "MSFT", "51.92", "51.95"], + ["20160525 13:30:00.079", "MSFT", "51.92", "51.95"], + ["20160525 13:30:00.080", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.084", "AAPL", "98.55", "98.56"], + ["20160525 13:30:00.086", "AAPL", "98.55", "98.63"], + ["20160525 13:30:00.088", "AAPL", "98.65", "98.63"], + ["20160525 13:30:00.089", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.63", "98.63"], + ["20160525 13:30:00.104", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.105", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.107", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.115", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.115", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.118", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.128", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.128", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.62", "98.63"], + ["20160525 13:30:00.129", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.130", "MSFT", "51.93", "51.95"], + ["20160525 13:30:00.130", "MSFT", "51.93", "51.95"], + ["20160525 13:30:00.130", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.131", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.131", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.135", "MSFT", "51.92", "51.95"], + ["20160525 13:30:00.135", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.136", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.136", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.144", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.144", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.145", "AAPL", "98.61", "98.62"], + ["20160525 13:30:00.145", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.145", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.145", "AAPL", "98.60", "98.63"], + ["20160525 13:30:00.145", "AAPL", "98.61", "98.63"], + ["20160525 13:30:00.145", "AAPL", "98.60", "98.63"], + ], + columns="time,ticker,bid,ask".split(","), + ) + quotes["bid"] = quotes["bid"].astype("float64") + quotes["ask"] = quotes["ask"].astype("float64") + quotes = self.prep_data(quotes, dedupe=True) + + result = merge_asof(trades, quotes, on="time", by="ticker") + tm.assert_frame_equal(result, expected) + + def test_basic_no_by(self, trades, asof, quotes): + f = ( + lambda x: x[x.ticker == "MSFT"] + .drop("ticker", axis=1) + .reset_index(drop=True) + ) + + # just use a single ticker + expected = f(asof) + trades = f(trades) + quotes = f(quotes) + + result = merge_asof(trades, quotes, on="time") + tm.assert_frame_equal(result, expected) + + def test_valid_join_keys(self, trades, quotes): + msg = r"incompatible merge keys \[1\] .* must be the same type" + + with pytest.raises(MergeError, match=msg): + merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker") + + with pytest.raises(MergeError, match="can only asof on a key for left"): + merge_asof(trades, quotes, on=["time", "ticker"], by="ticker") + + with pytest.raises(MergeError, match="can only asof on a key for left"): + merge_asof(trades, quotes, by="ticker") + + def test_with_duplicates(self, datapath, trades, quotes, asof): + q = ( + pd.concat([quotes, quotes]) + .sort_values(["time", "ticker"]) + .reset_index(drop=True) + ) + result = merge_asof(trades, q, on="time", by="ticker") + expected = self.prep_data(asof) + tm.assert_frame_equal(result, expected) + + def test_with_duplicates_no_on(self): + df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]}) + df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]}) + result = merge_asof(df1, df2, on="key") + expected = pd.DataFrame( + {"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]} + ) + tm.assert_frame_equal(result, expected) + + def test_valid_allow_exact_matches(self, trades, quotes): + msg = "allow_exact_matches must be boolean, passed foo" + + with pytest.raises(MergeError, match=msg): + merge_asof( + trades, quotes, on="time", by="ticker", allow_exact_matches="foo" + ) + + def test_valid_tolerance(self, trades, quotes): + # dti + merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s")) + + # integer + merge_asof( + trades.reset_index(), + quotes.reset_index(), + on="index", + by="ticker", + tolerance=1, + ) + + msg = r"incompatible tolerance .*, must be compat with type .*" + + # incompat + with pytest.raises(MergeError, match=msg): + merge_asof(trades, quotes, on="time", by="ticker", tolerance=1) + + # invalid + with pytest.raises(MergeError, match=msg): + merge_asof( + trades.reset_index(), + quotes.reset_index(), + on="index", + by="ticker", + tolerance=1.0, + ) + + msg = "tolerance must be positive" + + # invalid negative + with pytest.raises(MergeError, match=msg): + merge_asof( + trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s") + ) + + with pytest.raises(MergeError, match=msg): + merge_asof( + trades.reset_index(), + quotes.reset_index(), + on="index", + by="ticker", + tolerance=-1, + ) + + def test_non_sorted(self, trades, quotes): + trades = trades.sort_values("time", ascending=False) + quotes = quotes.sort_values("time", ascending=False) + + # we require that we are already sorted on time & quotes + assert not trades.time.is_monotonic_increasing + assert not quotes.time.is_monotonic_increasing + with pytest.raises(ValueError, match="left keys must be sorted"): + merge_asof(trades, quotes, on="time", by="ticker") + + trades = trades.sort_values("time") + assert trades.time.is_monotonic_increasing + assert not quotes.time.is_monotonic_increasing + with pytest.raises(ValueError, match="right keys must be sorted"): + merge_asof(trades, quotes, on="time", by="ticker") + + quotes = quotes.sort_values("time") + assert trades.time.is_monotonic_increasing + assert quotes.time.is_monotonic_increasing + + # ok, though has dupes + merge_asof(trades, quotes, on="time", by="ticker") + + @pytest.mark.parametrize( + "tolerance_ts", + [Timedelta("1day"), datetime.timedelta(days=1)], + ids=["Timedelta", "datetime.timedelta"], + ) + def test_tolerance(self, tolerance_ts, trades, quotes, tolerance): + result = merge_asof( + trades, quotes, on="time", by="ticker", tolerance=tolerance_ts + ) + expected = tolerance + tm.assert_frame_equal(result, expected) + + def test_tolerance_forward(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]} + ) + + result = merge_asof(left, right, on="a", direction="forward", tolerance=1) + tm.assert_frame_equal(result, expected) + + def test_tolerance_nearest(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]} + ) + + result = merge_asof(left, right, on="a", direction="nearest", tolerance=1) + tm.assert_frame_equal(result, expected) + + def test_tolerance_tz(self, unit): + # GH 14844 + left = pd.DataFrame( + { + "date": pd.date_range( + start=to_datetime("2016-01-02"), + freq="D", + periods=5, + tz=pytz.timezone("UTC"), + unit=unit, + ), + "value1": np.arange(5), + } + ) + right = pd.DataFrame( + { + "date": pd.date_range( + start=to_datetime("2016-01-01"), + freq="D", + periods=5, + tz=pytz.timezone("UTC"), + unit=unit, + ), + "value2": list("ABCDE"), + } + ) + result = merge_asof(left, right, on="date", tolerance=Timedelta("1 day")) + + expected = pd.DataFrame( + { + "date": pd.date_range( + start=to_datetime("2016-01-02"), + freq="D", + periods=5, + tz=pytz.timezone("UTC"), + unit=unit, + ), + "value1": np.arange(5), + "value2": list("BCDEE"), + } + ) + tm.assert_frame_equal(result, expected) + + def test_tolerance_float(self): + # GH22981 + left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame( + {"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]} + ) + + expected = pd.DataFrame( + { + "a": [1.1, 3.5, 10.9], + "left_val": ["a", "b", "c"], + "right_val": [1, 3.3, np.nan], + } + ) + + result = merge_asof(left, right, on="a", direction="nearest", tolerance=0.5) + tm.assert_frame_equal(result, expected) + + def test_index_tolerance(self, trades, quotes, tolerance): + # GH 15135 + expected = tolerance.set_index("time") + trades = trades.set_index("time") + quotes = quotes.set_index("time") + + result = merge_asof( + trades, + quotes, + left_index=True, + right_index=True, + by="ticker", + tolerance=Timedelta("1day"), + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches(self, trades, quotes, allow_exact_matches): + result = merge_asof( + trades, quotes, on="time", by="ticker", allow_exact_matches=False + ) + expected = allow_exact_matches + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_forward(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]} + ) + + result = merge_asof( + left, right, on="a", direction="forward", allow_exact_matches=False + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_nearest(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]} + ) + + result = merge_asof( + left, right, on="a", direction="nearest", allow_exact_matches=False + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance( + self, trades, quotes, allow_exact_matches_and_tolerance + ): + result = merge_asof( + trades, + quotes, + on="time", + by="ticker", + tolerance=Timedelta("100ms"), + allow_exact_matches=False, + ) + expected = allow_exact_matches_and_tolerance + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance2(self): + # GH 13695 + df1 = pd.DataFrame( + {"time": to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]} + ) + df2 = pd.DataFrame( + { + "time": to_datetime( + ["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"] + ), + "version": [1, 2], + } + ) + + result = merge_asof(df1, df2, on="time") + expected = pd.DataFrame( + { + "time": to_datetime(["2016-07-15 13:30:00.030"]), + "username": ["bob"], + "version": [2], + } + ) + tm.assert_frame_equal(result, expected) + + result = merge_asof(df1, df2, on="time", allow_exact_matches=False) + expected = pd.DataFrame( + { + "time": to_datetime(["2016-07-15 13:30:00.030"]), + "username": ["bob"], + "version": [1], + } + ) + tm.assert_frame_equal(result, expected) + + result = merge_asof( + df1, + df2, + on="time", + allow_exact_matches=False, + tolerance=Timedelta("10ms"), + ) + expected = pd.DataFrame( + { + "time": to_datetime(["2016-07-15 13:30:00.030"]), + "username": ["bob"], + "version": [np.nan], + } + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance3(self): + # GH 13709 + df1 = pd.DataFrame( + { + "time": to_datetime( + ["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"] + ), + "username": ["bob", "charlie"], + } + ) + df2 = pd.DataFrame( + { + "time": to_datetime( + ["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"] + ), + "version": [1, 2], + } + ) + + result = merge_asof( + df1, + df2, + on="time", + allow_exact_matches=False, + tolerance=Timedelta("10ms"), + ) + expected = pd.DataFrame( + { + "time": to_datetime( + ["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"] + ), + "username": ["bob", "charlie"], + "version": [np.nan, np.nan], + } + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance_forward(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]} + ) + + result = merge_asof( + left, + right, + on="a", + direction="forward", + allow_exact_matches=False, + tolerance=1, + ) + tm.assert_frame_equal(result, expected) + + def test_allow_exact_matches_and_tolerance_nearest(self): + # GH14887 + + left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]}) + + expected = pd.DataFrame( + {"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]} + ) + + result = merge_asof( + left, + right, + on="a", + direction="nearest", + allow_exact_matches=False, + tolerance=1, + ) + tm.assert_frame_equal(result, expected) + + def test_forward_by(self): + # GH14887 + + left = pd.DataFrame( + { + "a": [1, 5, 10, 12, 15], + "b": ["X", "X", "Y", "Z", "Y"], + "left_val": ["a", "b", "c", "d", "e"], + } + ) + right = pd.DataFrame( + { + "a": [1, 6, 11, 15, 16], + "b": ["X", "Z", "Y", "Z", "Y"], + "right_val": [1, 6, 11, 15, 16], + } + ) + + expected = pd.DataFrame( + { + "a": [1, 5, 10, 12, 15], + "b": ["X", "X", "Y", "Z", "Y"], + "left_val": ["a", "b", "c", "d", "e"], + "right_val": [1, np.nan, 11, 15, 16], + } + ) + + result = merge_asof(left, right, on="a", by="b", direction="forward") + tm.assert_frame_equal(result, expected) + + def test_nearest_by(self): + # GH14887 + + left = pd.DataFrame( + { + "a": [1, 5, 10, 12, 15], + "b": ["X", "X", "Z", "Z", "Y"], + "left_val": ["a", "b", "c", "d", "e"], + } + ) + right = pd.DataFrame( + { + "a": [1, 6, 11, 15, 16], + "b": ["X", "Z", "Z", "Z", "Y"], + "right_val": [1, 6, 11, 15, 16], + } + ) + + expected = pd.DataFrame( + { + "a": [1, 5, 10, 12, 15], + "b": ["X", "X", "Z", "Z", "Y"], + "left_val": ["a", "b", "c", "d", "e"], + "right_val": [1, 1, 11, 11, 16], + } + ) + + result = merge_asof(left, right, on="a", by="b", direction="nearest") + tm.assert_frame_equal(result, expected) + + def test_by_int(self): + # we specialize by type, so test that this is correct + df1 = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.020", + "20160525 13:30:00.030", + "20160525 13:30:00.040", + "20160525 13:30:00.050", + "20160525 13:30:00.060", + ] + ), + "key": [1, 2, 1, 3, 2], + "value1": [1.1, 1.2, 1.3, 1.4, 1.5], + }, + columns=["time", "key", "value1"], + ) + + df2 = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.015", + "20160525 13:30:00.020", + "20160525 13:30:00.025", + "20160525 13:30:00.035", + "20160525 13:30:00.040", + "20160525 13:30:00.055", + "20160525 13:30:00.060", + "20160525 13:30:00.065", + ] + ), + "key": [2, 1, 1, 3, 2, 1, 2, 3], + "value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8], + }, + columns=["time", "key", "value2"], + ) + + result = merge_asof(df1, df2, on="time", by="key") + + expected = pd.DataFrame( + { + "time": to_datetime( + [ + "20160525 13:30:00.020", + "20160525 13:30:00.030", + "20160525 13:30:00.040", + "20160525 13:30:00.050", + "20160525 13:30:00.060", + ] + ), + "key": [1, 2, 1, 3, 2], + "value1": [1.1, 1.2, 1.3, 1.4, 1.5], + "value2": [2.2, 2.1, 2.3, 2.4, 2.7], + }, + columns=["time", "key", "value1", "value2"], + ) + + tm.assert_frame_equal(result, expected) + + def test_on_float(self): + # mimics how to determine the minimum-price variation + df1 = pd.DataFrame( + { + "price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078], + "symbol": list("ABCDEFG"), + }, + columns=["symbol", "price"], + ) + + df2 = pd.DataFrame( + {"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]}, + columns=["price", "mpv"], + ) + + df1 = df1.sort_values("price").reset_index(drop=True) + + result = merge_asof(df1, df2, on="price") + + expected = pd.DataFrame( + { + "symbol": list("BGACEDF"), + "price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90], + "mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05], + }, + columns=["symbol", "price", "mpv"], + ) + + tm.assert_frame_equal(result, expected) + + def test_on_specialized_type(self, any_real_numpy_dtype): + # see gh-13936 + dtype = np.dtype(any_real_numpy_dtype).type + + df1 = pd.DataFrame( + {"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")}, + columns=["symbol", "value"], + ) + df1.value = dtype(df1.value) + + df2 = pd.DataFrame( + {"value": [0, 80, 120, 125], "result": list("xyzw")}, + columns=["value", "result"], + ) + df2.value = dtype(df2.value) + + df1 = df1.sort_values("value").reset_index(drop=True) + result = merge_asof(df1, df2, on="value") + + expected = pd.DataFrame( + { + "symbol": list("BACEGDF"), + "value": [2, 5, 25, 78, 79, 100, 120], + "result": list("xxxxxyz"), + }, + columns=["symbol", "value", "result"], + ) + expected.value = dtype(expected.value) + + tm.assert_frame_equal(result, expected) + + def test_on_specialized_type_by_int(self, any_real_numpy_dtype): + # see gh-13936 + dtype = np.dtype(any_real_numpy_dtype).type + + df1 = pd.DataFrame( + { + "value": [5, 2, 25, 100, 78, 120, 79], + "key": [1, 2, 3, 2, 3, 1, 2], + "symbol": list("ABCDEFG"), + }, + columns=["symbol", "key", "value"], + ) + df1.value = dtype(df1.value) + + df2 = pd.DataFrame( + {"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")}, + columns=["value", "key", "result"], + ) + df2.value = dtype(df2.value) + + df1 = df1.sort_values("value").reset_index(drop=True) + result = merge_asof(df1, df2, on="value", by="key") + + expected = pd.DataFrame( + { + "symbol": list("BACEGDF"), + "key": [2, 1, 3, 3, 2, 2, 1], + "value": [2, 5, 25, 78, 79, 100, 120], + "result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"], + }, + columns=["symbol", "key", "value", "result"], + ) + expected.value = dtype(expected.value) + + tm.assert_frame_equal(result, expected) + + def test_on_float_by_int(self): + # type specialize both "by" and "on" parameters + df1 = pd.DataFrame( + { + "symbol": list("AAABBBCCC"), + "exch": [1, 2, 3, 1, 2, 3, 1, 2, 3], + "price": [ + 3.26, + 3.2599, + 3.2598, + 12.58, + 12.59, + 12.5, + 378.15, + 378.2, + 378.25, + ], + }, + columns=["symbol", "exch", "price"], + ) + + df2 = pd.DataFrame( + { + "exch": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0], + "mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0], + }, + columns=["exch", "price", "mpv"], + ) + + df1 = df1.sort_values("price").reset_index(drop=True) + df2 = df2.sort_values("price").reset_index(drop=True) + + result = merge_asof(df1, df2, on="price", by="exch") + + expected = pd.DataFrame( + { + "symbol": list("AAABBBCCC"), + "exch": [3, 2, 1, 3, 1, 2, 1, 2, 3], + "price": [ + 3.2598, + 3.2599, + 3.26, + 12.5, + 12.58, + 12.59, + 378.15, + 378.2, + 378.25, + ], + "mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25], + }, + columns=["symbol", "exch", "price", "mpv"], + ) + + tm.assert_frame_equal(result, expected) + + def test_merge_datatype_error_raises(self, using_infer_string): + if using_infer_string: + msg = "incompatible merge keys" + else: + msg = r"Incompatible merge dtype, .*, both sides must have numeric dtype" + + left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]}) + right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]}) + + with pytest.raises(MergeError, match=msg): + merge_asof(left, right, on="a") + + def test_merge_datatype_categorical_error_raises(self): + msg = ( + r"incompatible merge keys \[0\] .* both sides category, " + "but not equal ones" + ) + + left = pd.DataFrame( + {"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])} + ) + right = pd.DataFrame( + { + "right_val": [1, 2, 3, 6, 7], + "a": pd.Categorical(["a", "X", "c", "X", "b"]), + } + ) + + with pytest.raises(MergeError, match=msg): + merge_asof(left, right, on="a") + + def test_merge_groupby_multiple_column_with_categorical_column(self): + # GH 16454 + df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])}) + result = merge_asof(df, df, on="x", by=["y", "z"]) + expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "func", [lambda x: x, lambda x: to_datetime(x)], ids=["numeric", "datetime"] + ) + @pytest.mark.parametrize("side", ["left", "right"]) + def test_merge_on_nans(self, func, side): + # GH 23189 + msg = f"Merge keys contain null values on {side} side" + nulls = func([1.0, 5.0, np.nan]) + non_nulls = func([1.0, 5.0, 10.0]) + df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]}) + df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]}) + + with pytest.raises(ValueError, match=msg): + if side == "left": + merge_asof(df_null, df, on="a") + else: + merge_asof(df, df_null, on="a") + + def test_by_nullable(self, any_numeric_ea_dtype, using_infer_string): + # Note: this test passes if instead of using pd.array we use + # np.array([np.nan, 1]). Other than that, I (@jbrockmendel) + # have NO IDEA what the expected behavior is. + # TODO(GH#32306): may be relevant to the expected behavior here. + + arr = pd.array([pd.NA, 0, 1], dtype=any_numeric_ea_dtype) + if arr.dtype.kind in ["i", "u"]: + max_val = np.iinfo(arr.dtype.numpy_dtype).max + else: + max_val = np.finfo(arr.dtype.numpy_dtype).max + # set value s.t. (at least for integer dtypes) arr._values_for_argsort + # is not an injection + arr[2] = max_val + + left = pd.DataFrame( + { + "by_col1": arr, + "by_col2": ["HELLO", "To", "You"], + "on_col": [2, 4, 6], + "value": ["a", "c", "e"], + } + ) + right = pd.DataFrame( + { + "by_col1": arr, + "by_col2": ["WORLD", "Wide", "Web"], + "on_col": [1, 2, 6], + "value": ["b", "d", "f"], + } + ) + + result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col") + expected = pd.DataFrame( + { + "by_col1": arr, + "by_col2": ["HELLO", "To", "You"], + "on_col": [2, 4, 6], + "value_x": ["a", "c", "e"], + } + ) + expected["value_y"] = np.array([np.nan, np.nan, np.nan], dtype=object) + if using_infer_string: + expected["value_y"] = expected["value_y"].astype("string[pyarrow_numpy]") + tm.assert_frame_equal(result, expected) + + def test_merge_by_col_tz_aware(self): + # GH 21184 + left = pd.DataFrame( + { + "by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "on_col": [2], + "values": ["a"], + } + ) + right = pd.DataFrame( + { + "by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "on_col": [1], + "values": ["b"], + } + ) + result = merge_asof(left, right, by="by_col", on="on_col") + expected = pd.DataFrame( + [[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]], + columns=["by_col", "on_col", "values_x", "values_y"], + ) + tm.assert_frame_equal(result, expected) + + def test_by_mixed_tz_aware(self, using_infer_string): + # GH 26649 + left = pd.DataFrame( + { + "by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "by_col2": ["HELLO"], + "on_col": [2], + "value": ["a"], + } + ) + right = pd.DataFrame( + { + "by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"), + "by_col2": ["WORLD"], + "on_col": [1], + "value": ["b"], + } + ) + result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col") + expected = pd.DataFrame( + [[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]], + columns=["by_col1", "by_col2", "on_col", "value_x"], + ) + expected["value_y"] = np.array([np.nan], dtype=object) + if using_infer_string: + expected["value_y"] = expected["value_y"].astype("string[pyarrow_numpy]") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["float64", "int16", "m8[ns]", "M8[us]"]) + def test_by_dtype(self, dtype): + # GH 55453, GH 22794 + left = pd.DataFrame( + { + "by_col": np.array([1], dtype=dtype), + "on_col": [2], + "value": ["a"], + } + ) + right = pd.DataFrame( + { + "by_col": np.array([1], dtype=dtype), + "on_col": [1], + "value": ["b"], + } + ) + result = merge_asof(left, right, by="by_col", on="on_col") + expected = pd.DataFrame( + { + "by_col": np.array([1], dtype=dtype), + "on_col": [2], + "value_x": ["a"], + "value_y": ["b"], + } + ) + tm.assert_frame_equal(result, expected) + + def test_timedelta_tolerance_nearest(self, unit): + # GH 27642 + if unit == "s": + pytest.skip( + "This test is invalid with unit='s' because that would " + "round left['time']" + ) + + left = pd.DataFrame( + list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])), + columns=["time", "left"], + ) + + left["time"] = pd.to_timedelta(left["time"], "ms").astype(f"m8[{unit}]") + + right = pd.DataFrame( + list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])), + columns=["time", "right"], + ) + + right["time"] = pd.to_timedelta(right["time"], "ms").astype(f"m8[{unit}]") + + expected = pd.DataFrame( + list( + zip( + [0, 5, 10, 15, 20, 25], + [0, 1, 2, 3, 4, 5], + [0, np.nan, 2, 4, np.nan, np.nan], + ) + ), + columns=["time", "left", "right"], + ) + + expected["time"] = pd.to_timedelta(expected["time"], "ms").astype(f"m8[{unit}]") + + result = merge_asof( + left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest" + ) + + tm.assert_frame_equal(result, expected) + + def test_int_type_tolerance(self, any_int_dtype): + # GH #28870 + + left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]}) + right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]}) + left["a"] = left["a"].astype(any_int_dtype) + right["a"] = right["a"].astype(any_int_dtype) + + expected = pd.DataFrame( + {"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]} + ) + expected["a"] = expected["a"].astype(any_int_dtype) + + result = merge_asof(left, right, on="a", tolerance=10) + tm.assert_frame_equal(result, expected) + + def test_merge_index_column_tz(self): + # GH 29864 + index = pd.date_range("2019-10-01", freq="30min", periods=5, tz="UTC") + left = pd.DataFrame([0.9, 0.8, 0.7, 0.6], columns=["xyz"], index=index[1:]) + right = pd.DataFrame({"from_date": index, "abc": [2.46] * 4 + [2.19]}) + result = merge_asof( + left=left, right=right, left_index=True, right_on=["from_date"] + ) + expected = pd.DataFrame( + { + "xyz": [0.9, 0.8, 0.7, 0.6], + "from_date": index[1:], + "abc": [2.46] * 3 + [2.19], + }, + index=pd.date_range( + "2019-10-01 00:30:00", freq="30min", periods=4, tz="UTC" + ), + ) + tm.assert_frame_equal(result, expected) + + result = merge_asof( + left=right, right=left, right_index=True, left_on=["from_date"] + ) + expected = pd.DataFrame( + { + "from_date": index, + "abc": [2.46] * 4 + [2.19], + "xyz": [np.nan, 0.9, 0.8, 0.7, 0.6], + }, + index=Index([0, 1, 2, 3, 4]), + ) + tm.assert_frame_equal(result, expected) + + def test_left_index_right_index_tolerance(self, unit): + # https://github.com/pandas-dev/pandas/issues/35558 + if unit == "s": + pytest.skip( + "This test is invalid with unit='s' because that would round dr1" + ) + + dr1 = pd.date_range( + start="1/1/2020", end="1/20/2020", freq="2D", unit=unit + ) + Timedelta(seconds=0.4).as_unit(unit) + dr2 = pd.date_range(start="1/1/2020", end="2/1/2020", unit=unit) + + df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1)) + df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2)) + + expected = pd.DataFrame( + {"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1) + ) + result = merge_asof( + df1, + df2, + left_index=True, + right_index=True, + tolerance=Timedelta(seconds=0.5), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +@pytest.mark.parametrize( + "kwargs", [{"on": "x"}, {"left_index": True, "right_index": True}] +) +@pytest.mark.parametrize( + "data", + [["2019-06-01 00:09:12", "2019-06-01 00:10:29"], [1.0, "2019-06-01 00:10:29"]], +) +def test_merge_asof_non_numerical_dtype(kwargs, data, infer_string): + # GH#29130 + with option_context("future.infer_string", infer_string): + left = pd.DataFrame({"x": data}, index=data) + right = pd.DataFrame({"x": data}, index=data) + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + merge_asof(left, right, **kwargs) + + +def test_merge_asof_non_numerical_dtype_object(): + # GH#29130 + left = pd.DataFrame({"a": ["12", "13", "15"], "left_val1": ["a", "b", "c"]}) + right = pd.DataFrame({"a": ["a", "b", "c"], "left_val": ["d", "e", "f"]}) + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + merge_asof( + left, + right, + left_on="left_val1", + right_on="a", + left_by="a", + right_by="left_val", + ) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"right_index": True, "left_index": True}, + {"left_on": "left_time", "right_index": True}, + {"left_index": True, "right_on": "right"}, + ], +) +def test_merge_asof_index_behavior(kwargs): + # GH 33463 + index = Index([1, 5, 10], name="test") + left = pd.DataFrame({"left": ["a", "b", "c"], "left_time": [1, 4, 10]}, index=index) + right = pd.DataFrame({"right": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) + result = merge_asof(left, right, **kwargs) + + expected = pd.DataFrame( + {"left": ["a", "b", "c"], "left_time": [1, 4, 10], "right": [1, 3, 7]}, + index=index, + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_numeric_column_in_index(): + # GH#34488 + left = pd.DataFrame({"b": [10, 11, 12]}, index=Index([1, 2, 3], name="a")) + right = pd.DataFrame({"c": [20, 21, 22]}, index=Index([0, 2, 3], name="a")) + + result = merge_asof(left, right, left_on="a", right_on="a") + expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]}) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_numeric_column_in_multiindex(): + # GH#34488 + left = pd.DataFrame( + {"b": [10, 11, 12]}, + index=pd.MultiIndex.from_arrays([[1, 2, 3], ["a", "b", "c"]], names=["a", "z"]), + ) + right = pd.DataFrame( + {"c": [20, 21, 22]}, + index=pd.MultiIndex.from_arrays([[1, 2, 3], ["x", "y", "z"]], names=["a", "y"]), + ) + + result = merge_asof(left, right, left_on="a", right_on="a") + expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]}) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_numeri_column_in_index_object_dtype(): + # GH#34488 + left = pd.DataFrame({"b": [10, 11, 12]}, index=Index(["1", "2", "3"], name="a")) + right = pd.DataFrame({"c": [20, 21, 22]}, index=Index(["m", "n", "o"], name="a")) + + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + merge_asof(left, right, left_on="a", right_on="a") + + left = left.reset_index().set_index(["a", "b"]) + right = right.reset_index().set_index(["a", "c"]) + + with pytest.raises( + MergeError, + match=r"Incompatible merge dtype, .*, both sides must have numeric dtype", + ): + merge_asof(left, right, left_on="a", right_on="a") + + +def test_merge_asof_array_as_on(unit): + # GH#42844 + dti = pd.DatetimeIndex( + ["2021/01/01 00:37", "2021/01/01 01:40"], dtype=f"M8[{unit}]" + ) + right = pd.DataFrame( + { + "a": [2, 6], + "ts": dti, + } + ) + ts_merge = pd.date_range( + start=pd.Timestamp("2021/01/01 00:00"), periods=3, freq="1h", unit=unit + ) + left = pd.DataFrame({"b": [4, 8, 7]}) + result = merge_asof( + left, + right, + left_on=ts_merge, + right_on="ts", + allow_exact_matches=False, + direction="backward", + ) + expected = pd.DataFrame({"b": [4, 8, 7], "a": [np.nan, 2, 6], "ts": ts_merge}) + tm.assert_frame_equal(result, expected) + + result = merge_asof( + right, + left, + left_on="ts", + right_on=ts_merge, + allow_exact_matches=False, + direction="backward", + ) + expected = pd.DataFrame( + { + "a": [2, 6], + "ts": dti, + "b": [4, 8], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_raise_for_duplicate_columns(): + # GH#50102 + left = pd.DataFrame([[1, 2, "a"]], columns=["a", "a", "left_val"]) + right = pd.DataFrame([[1, 1, 1]], columns=["a", "a", "right_val"]) + + with pytest.raises(ValueError, match="column label 'a'"): + merge_asof(left, right, on="a") + + with pytest.raises(ValueError, match="column label 'a'"): + merge_asof(left, right, left_on="a", right_on="right_val") + + with pytest.raises(ValueError, match="column label 'a'"): + merge_asof(left, right, left_on="left_val", right_on="a") + + +@pytest.mark.parametrize( + "dtype", + [ + "Int64", + pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")), + pytest.param("timestamp[s][pyarrow]", marks=td.skip_if_no("pyarrow")), + ], +) +def test_merge_asof_extension_dtype(dtype): + # GH 52904 + left = pd.DataFrame( + { + "join_col": [1, 3, 5], + "left_val": [1, 2, 3], + } + ) + right = pd.DataFrame( + { + "join_col": [2, 3, 4], + "right_val": [1, 2, 3], + } + ) + left = left.astype({"join_col": dtype}) + right = right.astype({"join_col": dtype}) + result = merge_asof(left, right, on="join_col") + expected = pd.DataFrame( + { + "join_col": [1, 3, 5], + "left_val": [1, 2, 3], + "right_val": [np.nan, 2.0, 3.0], + } + ) + expected = expected.astype({"join_col": dtype}) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("pyarrow") +def test_merge_asof_pyarrow_td_tolerance(): + # GH 56486 + ser = pd.Series( + [datetime.datetime(2023, 1, 1)], dtype="timestamp[us, UTC][pyarrow]" + ) + df = pd.DataFrame( + { + "timestamp": ser, + "value": [1], + } + ) + result = merge_asof(df, df, on="timestamp", tolerance=Timedelta("1s")) + expected = pd.DataFrame( + { + "timestamp": ser, + "value_x": [1], + "value_y": [1], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_read_only_ndarray(): + # GH 53513 + left = pd.Series([2], index=[2], name="left") + right = pd.Series([1], index=[1], name="right") + # set to read-only + left.index.values.flags.writeable = False + right.index.values.flags.writeable = False + result = merge_asof(left, right, left_index=True, right_index=True) + expected = pd.DataFrame({"left": [2], "right": [1]}, index=[2]) + tm.assert_frame_equal(result, expected) + + +def test_merge_asof_multiby_with_categorical(): + # GH 43541 + left = pd.DataFrame( + { + "c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]), + "c2": ["x"] * 4, + "t": [1] * 4, + "v": range(4), + } + ) + right = pd.DataFrame( + { + "c1": pd.Categorical(["b", "b"], categories=["b", "a"]), + "c2": ["x"] * 2, + "t": [1, 2], + "v": range(2), + } + ) + result = merge_asof( + left, + right, + by=["c1", "c2"], + on="t", + direction="forward", + suffixes=["_left", "_right"], + ) + expected = pd.DataFrame( + { + "c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]), + "c2": ["x"] * 4, + "t": [1] * 4, + "v_left": range(4), + "v_right": [np.nan, np.nan, 0.0, 0.0], + } + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_cross.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_cross.py new file mode 100644 index 0000000000000000000000000000000000000000..14f9036e43fce13580916dacfd6409e4639d025e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_cross.py @@ -0,0 +1,111 @@ +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.core.reshape.merge import ( + MergeError, + merge, +) + + +@pytest.mark.parametrize( + ("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])] +) +def test_merge_cross(input_col, output_cols): + # GH#5401 + left = DataFrame({"a": [1, 3]}) + right = DataFrame({input_col: [3, 4]}) + left_copy = left.copy() + right_copy = right.copy() + result = merge(left, right, how="cross") + expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]}) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(left, left_copy) + tm.assert_frame_equal(right, right_copy) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"left_index": True}, + {"right_index": True}, + {"on": "a"}, + {"left_on": "a"}, + {"right_on": "b"}, + ], +) +def test_merge_cross_error_reporting(kwargs): + # GH#5401 + left = DataFrame({"a": [1, 3]}) + right = DataFrame({"b": [3, 4]}) + msg = ( + "Can not pass on, right_on, left_on or set right_index=True or " + "left_index=True" + ) + with pytest.raises(MergeError, match=msg): + merge(left, right, how="cross", **kwargs) + + +def test_merge_cross_mixed_dtypes(): + # GH#5401 + left = DataFrame(["a", "b", "c"], columns=["A"]) + right = DataFrame(range(2), columns=["B"]) + result = merge(left, right, how="cross") + expected = DataFrame({"A": ["a", "a", "b", "b", "c", "c"], "B": [0, 1, 0, 1, 0, 1]}) + tm.assert_frame_equal(result, expected) + + +def test_merge_cross_more_than_one_column(): + # GH#5401 + left = DataFrame({"A": list("ab"), "B": [2, 1]}) + right = DataFrame({"C": range(2), "D": range(4, 6)}) + result = merge(left, right, how="cross") + expected = DataFrame( + { + "A": ["a", "a", "b", "b"], + "B": [2, 2, 1, 1], + "C": [0, 1, 0, 1], + "D": [4, 5, 4, 5], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_merge_cross_null_values(nulls_fixture): + # GH#5401 + left = DataFrame({"a": [1, nulls_fixture]}) + right = DataFrame({"b": ["a", "b"], "c": [1.0, 2.0]}) + result = merge(left, right, how="cross") + expected = DataFrame( + { + "a": [1, 1, nulls_fixture, nulls_fixture], + "b": ["a", "b", "a", "b"], + "c": [1.0, 2.0, 1.0, 2.0], + } + ) + tm.assert_frame_equal(result, expected) + + +def test_join_cross_error_reporting(): + # GH#5401 + left = DataFrame({"a": [1, 3]}) + right = DataFrame({"a": [3, 4]}) + msg = ( + "Can not pass on, right_on, left_on or set right_index=True or " + "left_index=True" + ) + with pytest.raises(MergeError, match=msg): + left.join(right, how="cross", on="a") + + +def test_merge_cross_series(): + # GH#54055 + ls = Series([1, 2, 3, 4], index=[1, 2, 3, 4], name="left") + rs = Series([3, 4, 5, 6], index=[3, 4, 5, 6], name="right") + res = merge(ls, rs, how="cross") + + expected = merge(ls.to_frame(), rs.to_frame(), how="cross") + tm.assert_frame_equal(res, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_index_as_string.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_index_as_string.py new file mode 100644 index 0000000000000000000000000000000000000000..b4271d4face4fb4e90477196cec7919845f37813 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_index_as_string.py @@ -0,0 +1,186 @@ +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +@pytest.fixture +def df1(): + return DataFrame( + { + "outer": [1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4], + "inner": [1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2], + "v1": np.linspace(0, 1, 11), + } + ) + + +@pytest.fixture +def df2(): + return DataFrame( + { + "outer": [1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3], + "inner": [1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3], + "v2": np.linspace(10, 11, 12), + } + ) + + +@pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) +def left_df(request, df1): + """Construct left test DataFrame with specified levels + (any of 'outer', 'inner', and 'v1') + """ + levels = request.param + if levels: + df1 = df1.set_index(levels) + + return df1 + + +@pytest.fixture(params=[[], ["outer"], ["outer", "inner"]]) +def right_df(request, df2): + """Construct right test DataFrame with specified levels + (any of 'outer', 'inner', and 'v2') + """ + levels = request.param + + if levels: + df2 = df2.set_index(levels) + + return df2 + + +def compute_expected(df_left, df_right, on=None, left_on=None, right_on=None, how=None): + """ + Compute the expected merge result for the test case. + + This method computes the expected result of merging two DataFrames on + a combination of their columns and index levels. It does so by + explicitly dropping/resetting their named index levels, performing a + merge on their columns, and then finally restoring the appropriate + index in the result. + + Parameters + ---------- + df_left : DataFrame + The left DataFrame (may have zero or more named index levels) + df_right : DataFrame + The right DataFrame (may have zero or more named index levels) + on : list of str + The on parameter to the merge operation + left_on : list of str + The left_on parameter to the merge operation + right_on : list of str + The right_on parameter to the merge operation + how : str + The how parameter to the merge operation + + Returns + ------- + DataFrame + The expected merge result + """ + # Handle on param if specified + if on is not None: + left_on, right_on = on, on + + # Compute input named index levels + left_levels = [n for n in df_left.index.names if n is not None] + right_levels = [n for n in df_right.index.names if n is not None] + + # Compute output named index levels + output_levels = [i for i in left_on if i in right_levels and i in left_levels] + + # Drop index levels that aren't involved in the merge + drop_left = [n for n in left_levels if n not in left_on] + if drop_left: + df_left = df_left.reset_index(drop_left, drop=True) + + drop_right = [n for n in right_levels if n not in right_on] + if drop_right: + df_right = df_right.reset_index(drop_right, drop=True) + + # Convert remaining index levels to columns + reset_left = [n for n in left_levels if n in left_on] + if reset_left: + df_left = df_left.reset_index(level=reset_left) + + reset_right = [n for n in right_levels if n in right_on] + if reset_right: + df_right = df_right.reset_index(level=reset_right) + + # Perform merge + expected = df_left.merge(df_right, left_on=left_on, right_on=right_on, how=how) + + # Restore index levels + if output_levels: + expected = expected.set_index(output_levels) + + return expected + + +@pytest.mark.parametrize( + "on,how", + [ + (["outer"], "inner"), + (["inner"], "left"), + (["outer", "inner"], "right"), + (["inner", "outer"], "outer"), + ], +) +def test_merge_indexes_and_columns_on(left_df, right_df, on, how): + # Construct expected result + expected = compute_expected(left_df, right_df, on=on, how=how) + + # Perform merge + result = left_df.merge(right_df, on=on, how=how) + tm.assert_frame_equal(result, expected, check_like=True) + + +@pytest.mark.parametrize( + "left_on,right_on,how", + [ + (["outer"], ["outer"], "inner"), + (["inner"], ["inner"], "right"), + (["outer", "inner"], ["outer", "inner"], "left"), + (["inner", "outer"], ["inner", "outer"], "outer"), + ], +) +def test_merge_indexes_and_columns_lefton_righton( + left_df, right_df, left_on, right_on, how +): + # Construct expected result + expected = compute_expected( + left_df, right_df, left_on=left_on, right_on=right_on, how=how + ) + + # Perform merge + result = left_df.merge(right_df, left_on=left_on, right_on=right_on, how=how) + tm.assert_frame_equal(result, expected, check_like=True) + + +@pytest.mark.parametrize("left_index", ["inner", ["inner", "outer"]]) +def test_join_indexes_and_columns_on(df1, df2, left_index, join_type): + # Construct left_df + left_df = df1.set_index(left_index) + + # Construct right_df + right_df = df2.set_index(["outer", "inner"]) + + # Result + expected = ( + left_df.reset_index() + .join( + right_df, on=["outer", "inner"], how=join_type, lsuffix="_x", rsuffix="_y" + ) + .set_index(left_index) + ) + + # Perform join + result = left_df.join( + right_df, on=["outer", "inner"], how=join_type, lsuffix="_x", rsuffix="_y" + ) + + tm.assert_frame_equal(result, expected, check_like=True) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_ordered.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_ordered.py new file mode 100644 index 0000000000000000000000000000000000000000..0bd3ca3cf2c1bd8ceae38d8a04cd6d48c4c69f68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_merge_ordered.py @@ -0,0 +1,244 @@ +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + merge_ordered, +) +import pandas._testing as tm + + +@pytest.fixture +def left(): + return DataFrame({"key": ["a", "c", "e"], "lvalue": [1, 2.0, 3]}) + + +@pytest.fixture +def right(): + return DataFrame({"key": ["b", "c", "d", "f"], "rvalue": [1, 2, 3.0, 4]}) + + +class TestMergeOrdered: + def test_basic(self, left, right): + result = merge_ordered(left, right, on="key") + expected = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "f"], + "lvalue": [1, np.nan, 2, np.nan, 3, np.nan], + "rvalue": [np.nan, 1, 2, 3, np.nan, 4], + } + ) + + tm.assert_frame_equal(result, expected) + + def test_ffill(self, left, right): + result = merge_ordered(left, right, on="key", fill_method="ffill") + expected = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "f"], + "lvalue": [1.0, 1, 2, 2, 3, 3.0], + "rvalue": [np.nan, 1, 2, 3, 3, 4], + } + ) + tm.assert_frame_equal(result, expected) + + def test_multigroup(self, left, right): + left = pd.concat([left, left], ignore_index=True) + + left["group"] = ["a"] * 3 + ["b"] * 3 + + result = merge_ordered( + left, right, on="key", left_by="group", fill_method="ffill" + ) + expected = DataFrame( + { + "key": ["a", "b", "c", "d", "e", "f"] * 2, + "lvalue": [1.0, 1, 2, 2, 3, 3.0] * 2, + "rvalue": [np.nan, 1, 2, 3, 3, 4] * 2, + } + ) + expected["group"] = ["a"] * 6 + ["b"] * 6 + + tm.assert_frame_equal(result, expected.loc[:, result.columns]) + + result2 = merge_ordered( + right, left, on="key", right_by="group", fill_method="ffill" + ) + tm.assert_frame_equal(result, result2.loc[:, result.columns]) + + result = merge_ordered(left, right, on="key", left_by="group") + assert result["group"].notna().all() + + @pytest.mark.filterwarnings( + "ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning" + ) + def test_merge_type(self, left, right): + class NotADataFrame(DataFrame): + @property + def _constructor(self): + return NotADataFrame + + nad = NotADataFrame(left) + result = nad.merge(right, on="key") + + assert isinstance(result, NotADataFrame) + + @pytest.mark.parametrize( + "df_seq, pattern", + [ + ((), "[Nn]o objects"), + ([], "[Nn]o objects"), + ({}, "[Nn]o objects"), + ([None], "objects.*None"), + ([None, None], "objects.*None"), + ], + ) + def test_empty_sequence_concat(self, df_seq, pattern): + # GH 9157 + with pytest.raises(ValueError, match=pattern): + pd.concat(df_seq) + + @pytest.mark.parametrize( + "arg", [[DataFrame()], [None, DataFrame()], [DataFrame(), None]] + ) + def test_empty_sequence_concat_ok(self, arg): + pd.concat(arg) + + def test_doc_example(self): + left = DataFrame( + { + "group": list("aaabbb"), + "key": ["a", "c", "e", "a", "c", "e"], + "lvalue": [1, 2, 3] * 2, + } + ) + + right = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) + + result = merge_ordered(left, right, fill_method="ffill", left_by="group") + + expected = DataFrame( + { + "group": list("aaaaabbbbb"), + "key": ["a", "b", "c", "d", "e"] * 2, + "lvalue": [1, 1, 2, 2, 3] * 2, + "rvalue": [np.nan, 1, 2, 3, 3] * 2, + } + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "left, right, on, left_by, right_by, expected", + [ + ( + DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}), + DataFrame({"T": [2], "E": [1]}), + ["T"], + ["G", "H"], + None, + DataFrame( + { + "G": ["g"] * 3, + "H": ["h"] * 3, + "T": [1, 2, 3], + "E": [np.nan, 1.0, np.nan], + } + ), + ), + ( + DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}), + DataFrame({"T": [2], "E": [1]}), + "T", + ["G", "H"], + None, + DataFrame( + { + "G": ["g"] * 3, + "H": ["h"] * 3, + "T": [1, 2, 3], + "E": [np.nan, 1.0, np.nan], + } + ), + ), + ( + DataFrame({"T": [2], "E": [1]}), + DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}), + ["T"], + None, + ["G", "H"], + DataFrame( + { + "T": [1, 2, 3], + "E": [np.nan, 1.0, np.nan], + "G": ["g"] * 3, + "H": ["h"] * 3, + } + ), + ), + ], + ) + def test_list_type_by(self, left, right, on, left_by, right_by, expected): + # GH 35269 + result = merge_ordered( + left=left, + right=right, + on=on, + left_by=left_by, + right_by=right_by, + ) + + tm.assert_frame_equal(result, expected) + + def test_left_by_length_equals_to_right_shape0(self): + # GH 38166 + left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE")) + right = DataFrame([[2, 1]], columns=list("ET")) + result = merge_ordered(left, right, on="E", left_by=["G", "H"]) + expected = DataFrame( + {"G": ["g"] * 3, "H": ["h"] * 3, "E": [1, 2, 3], "T": [np.nan, 1.0, np.nan]} + ) + + tm.assert_frame_equal(result, expected) + + def test_elements_not_in_by_but_in_df(self): + # GH 38167 + left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE")) + right = DataFrame([[2, 1]], columns=list("ET")) + msg = r"\{'h'\} not found in left columns" + with pytest.raises(KeyError, match=msg): + merge_ordered(left, right, on="E", left_by=["G", "h"]) + + @pytest.mark.parametrize("invalid_method", ["linear", "carrot"]) + def test_ffill_validate_fill_method(self, left, right, invalid_method): + # GH 55884 + with pytest.raises( + ValueError, match=re.escape("fill_method must be 'ffill' or None") + ): + merge_ordered(left, right, on="key", fill_method=invalid_method) + + def test_ffill_left_merge(self): + # GH 57010 + df1 = DataFrame( + { + "key": ["a", "c", "e", "a", "c", "e"], + "lvalue": [1, 2, 3, 1, 2, 3], + "group": ["a", "a", "a", "b", "b", "b"], + } + ) + df2 = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) + result = merge_ordered( + df1, df2, fill_method="ffill", left_by="group", how="left" + ) + expected = DataFrame( + { + "key": ["a", "c", "e", "a", "c", "e"], + "lvalue": [1, 2, 3, 1, 2, 3], + "group": ["a", "a", "a", "b", "b", "b"], + "rvalue": [np.nan, 2.0, 2.0, np.nan, 2.0, 2.0], + } + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_multi.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_multi.py new file mode 100644 index 0000000000000000000000000000000000000000..402ff049884ba25e557d8c415fa9b50030d13b06 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/merge/test_multi.py @@ -0,0 +1,934 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, + Timestamp, + option_context, +) +import pandas._testing as tm +from pandas.core.reshape.concat import concat +from pandas.core.reshape.merge import merge + + +@pytest.fixture +def left(): + """left dataframe (not multi-indexed) for multi-index join tests""" + # a little relevant example with NAs + key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"] + key2 = ["two", "one", "three", "one", "two", "one", "two", "two", "three", "one"] + + data = np.random.default_rng(2).standard_normal(len(key1)) + return DataFrame({"key1": key1, "key2": key2, "data": data}) + + +@pytest.fixture +def right(multiindex_dataframe_random_data): + """right dataframe (multi-indexed) for multi-index join tests""" + df = multiindex_dataframe_random_data + df.index.names = ["key1", "key2"] + + df.columns = ["j_one", "j_two", "j_three"] + return df + + +@pytest.fixture +def left_multi(): + return DataFrame( + { + "Origin": ["A", "A", "B", "B", "C"], + "Destination": ["A", "B", "A", "C", "A"], + "Period": ["AM", "AM", "IP", "AM", "OP"], + "TripPurp": ["hbw", "nhb", "hbo", "nhb", "hbw"], + "Trips": [1987, 3647, 2470, 4296, 4444], + }, + columns=["Origin", "Destination", "Period", "TripPurp", "Trips"], + ).set_index(["Origin", "Destination", "Period", "TripPurp"]) + + +@pytest.fixture +def right_multi(): + return DataFrame( + { + "Origin": ["A", "A", "B", "B", "C", "C", "E"], + "Destination": ["A", "B", "A", "B", "A", "B", "F"], + "Period": ["AM", "AM", "IP", "AM", "OP", "IP", "AM"], + "LinkType": ["a", "b", "c", "b", "a", "b", "a"], + "Distance": [100, 80, 90, 80, 75, 35, 55], + }, + columns=["Origin", "Destination", "Period", "LinkType", "Distance"], + ).set_index(["Origin", "Destination", "Period", "LinkType"]) + + +@pytest.fixture +def on_cols_multi(): + return ["Origin", "Destination", "Period"] + + +class TestMergeMulti: + def test_merge_on_multikey(self, left, right, join_type): + on_cols = ["key1", "key2"] + result = left.join(right, on=on_cols, how=join_type).reset_index(drop=True) + + expected = merge(left, right.reset_index(), on=on_cols, how=join_type) + + tm.assert_frame_equal(result, expected) + + result = left.join(right, on=on_cols, how=join_type, sort=True).reset_index( + drop=True + ) + + expected = merge( + left, right.reset_index(), on=on_cols, how=join_type, sort=True + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + @pytest.mark.parametrize("sort", [True, False]) + def test_left_join_multi_index(self, sort, infer_string): + with option_context("future.infer_string", infer_string): + icols = ["1st", "2nd", "3rd"] + + def bind_cols(df): + iord = lambda a: 0 if a != a else ord(a) + f = lambda ts: ts.map(iord) - ord("a") + return f(df["1st"]) + f(df["3rd"]) * 1e2 + df["2nd"].fillna(0) * 10 + + def run_asserts(left, right, sort): + res = left.join(right, on=icols, how="left", sort=sort) + + assert len(left) < len(res) + 1 + assert not res["4th"].isna().any() + assert not res["5th"].isna().any() + + tm.assert_series_equal(res["4th"], -res["5th"], check_names=False) + result = bind_cols(res.iloc[:, :-2]) + tm.assert_series_equal(res["4th"], result, check_names=False) + assert result.name is None + + if sort: + tm.assert_frame_equal(res, res.sort_values(icols, kind="mergesort")) + + out = merge(left, right.reset_index(), on=icols, sort=sort, how="left") + + res.index = RangeIndex(len(res)) + tm.assert_frame_equal(out, res) + + lc = list(map(chr, np.arange(ord("a"), ord("z") + 1))) + left = DataFrame( + np.random.default_rng(2).choice(lc, (50, 2)), columns=["1st", "3rd"] + ) + # Explicit cast to float to avoid implicit cast when setting nan + left.insert( + 1, + "2nd", + np.random.default_rng(2).integers(0, 10, len(left)).astype("float"), + ) + + i = np.random.default_rng(2).permutation(len(left)) + right = left.iloc[i].copy() + + left["4th"] = bind_cols(left) + right["5th"] = -bind_cols(right) + right.set_index(icols, inplace=True) + + run_asserts(left, right, sort) + + # inject some nulls + left.loc[1::4, "1st"] = np.nan + left.loc[2::5, "2nd"] = np.nan + left.loc[3::6, "3rd"] = np.nan + left["4th"] = bind_cols(left) + + i = np.random.default_rng(2).permutation(len(left)) + right = left.iloc[i, :-1] + right["5th"] = -bind_cols(right) + right.set_index(icols, inplace=True) + + run_asserts(left, right, sort) + + @pytest.mark.parametrize("sort", [False, True]) + def test_merge_right_vs_left(self, left, right, sort): + # compare left vs right merge with multikey + on_cols = ["key1", "key2"] + merged_left_right = left.merge( + right, left_on=on_cols, right_index=True, how="left", sort=sort + ) + + merge_right_left = right.merge( + left, right_on=on_cols, left_index=True, how="right", sort=sort + ) + + # Reorder columns + merge_right_left = merge_right_left[merged_left_right.columns] + + tm.assert_frame_equal(merged_left_right, merge_right_left) + + def test_merge_multiple_cols_with_mixed_cols_index(self): + # GH29522 + s = Series( + range(6), + MultiIndex.from_product([["A", "B"], [1, 2, 3]], names=["lev1", "lev2"]), + name="Amount", + ) + df = DataFrame({"lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": 0}) + result = merge(df, s.reset_index(), on=["lev1", "lev2"]) + expected = DataFrame( + { + "lev1": list("AAABBB"), + "lev2": [1, 2, 3, 1, 2, 3], + "col": [0] * 6, + "Amount": range(6), + } + ) + tm.assert_frame_equal(result, expected) + + def test_compress_group_combinations(self): + # ~ 40000000 possible unique groups + key1 = [str(i) for i in range(10000)] + key1 = np.tile(key1, 2) + key2 = key1[::-1] + + df = DataFrame( + { + "key1": key1, + "key2": key2, + "value1": np.random.default_rng(2).standard_normal(20000), + } + ) + + df2 = DataFrame( + { + "key1": key1[::2], + "key2": key2[::2], + "value2": np.random.default_rng(2).standard_normal(10000), + } + ) + + # just to hit the label compression code path + merge(df, df2, how="outer") + + def test_left_join_index_preserve_order(self): + on_cols = ["k1", "k2"] + left = DataFrame( + { + "k1": [0, 1, 2] * 8, + "k2": ["foo", "bar"] * 12, + "v": np.array(np.arange(24), dtype=np.int64), + } + ) + + index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")]) + right = DataFrame({"v2": [5, 7]}, index=index) + + result = left.join(right, on=on_cols) + + expected = left.copy() + expected["v2"] = np.nan + expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5 + expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7 + + tm.assert_frame_equal(result, expected) + + result.sort_values(on_cols, kind="mergesort", inplace=True) + expected = left.join(right, on=on_cols, sort=True) + + tm.assert_frame_equal(result, expected) + + # test join with multi dtypes blocks + left = DataFrame( + { + "k1": [0, 1, 2] * 8, + "k2": ["foo", "bar"] * 12, + "k3": np.array([0, 1, 2] * 8, dtype=np.float32), + "v": np.array(np.arange(24), dtype=np.int32), + } + ) + + index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")]) + right = DataFrame({"v2": [5, 7]}, index=index) + + result = left.join(right, on=on_cols) + + expected = left.copy() + expected["v2"] = np.nan + expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5 + expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7 + + tm.assert_frame_equal(result, expected) + + result = result.sort_values(on_cols, kind="mergesort") + expected = left.join(right, on=on_cols, sort=True) + + tm.assert_frame_equal(result, expected) + + def test_left_join_index_multi_match_multiindex(self): + left = DataFrame( + [ + ["X", "Y", "C", "a"], + ["W", "Y", "C", "e"], + ["V", "Q", "A", "h"], + ["V", "R", "D", "i"], + ["X", "Y", "D", "b"], + ["X", "Y", "A", "c"], + ["W", "Q", "B", "f"], + ["W", "R", "C", "g"], + ["V", "Y", "C", "j"], + ["X", "Y", "B", "d"], + ], + columns=["cola", "colb", "colc", "tag"], + index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8], + ) + + right = DataFrame( + [ + ["W", "R", "C", 0], + ["W", "Q", "B", 3], + ["W", "Q", "B", 8], + ["X", "Y", "A", 1], + ["X", "Y", "A", 4], + ["X", "Y", "B", 5], + ["X", "Y", "C", 6], + ["X", "Y", "C", 9], + ["X", "Q", "C", -6], + ["X", "R", "C", -9], + ["V", "Y", "C", 7], + ["V", "R", "D", 2], + ["V", "R", "D", -1], + ["V", "Q", "A", -3], + ], + columns=["col1", "col2", "col3", "val"], + ).set_index(["col1", "col2", "col3"]) + + result = left.join(right, on=["cola", "colb", "colc"], how="left") + + expected = DataFrame( + [ + ["X", "Y", "C", "a", 6], + ["X", "Y", "C", "a", 9], + ["W", "Y", "C", "e", np.nan], + ["V", "Q", "A", "h", -3], + ["V", "R", "D", "i", 2], + ["V", "R", "D", "i", -1], + ["X", "Y", "D", "b", np.nan], + ["X", "Y", "A", "c", 1], + ["X", "Y", "A", "c", 4], + ["W", "Q", "B", "f", 3], + ["W", "Q", "B", "f", 8], + ["W", "R", "C", "g", 0], + ["V", "Y", "C", "j", 7], + ["X", "Y", "B", "d", 5], + ], + columns=["cola", "colb", "colc", "tag", "val"], + index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8], + ) + + tm.assert_frame_equal(result, expected) + + result = left.join(right, on=["cola", "colb", "colc"], how="left", sort=True) + + expected = expected.sort_values(["cola", "colb", "colc"], kind="mergesort") + + tm.assert_frame_equal(result, expected) + + def test_left_join_index_multi_match(self): + left = DataFrame( + [["c", 0], ["b", 1], ["a", 2], ["b", 3]], + columns=["tag", "val"], + index=[2, 0, 1, 3], + ) + + right = DataFrame( + [ + ["a", "v"], + ["c", "w"], + ["c", "x"], + ["d", "y"], + ["a", "z"], + ["c", "r"], + ["e", "q"], + ["c", "s"], + ], + columns=["tag", "char"], + ).set_index("tag") + + result = left.join(right, on="tag", how="left") + + expected = DataFrame( + [ + ["c", 0, "w"], + ["c", 0, "x"], + ["c", 0, "r"], + ["c", 0, "s"], + ["b", 1, np.nan], + ["a", 2, "v"], + ["a", 2, "z"], + ["b", 3, np.nan], + ], + columns=["tag", "val", "char"], + index=[2, 2, 2, 2, 0, 1, 1, 3], + ) + + tm.assert_frame_equal(result, expected) + + result = left.join(right, on="tag", how="left", sort=True) + expected2 = expected.sort_values("tag", kind="mergesort") + + tm.assert_frame_equal(result, expected2) + + # GH7331 - maintain left frame order in left merge + result = merge(left, right.reset_index(), how="left", on="tag") + expected.index = RangeIndex(len(expected)) + tm.assert_frame_equal(result, expected) + + def test_left_merge_na_buglet(self): + left = DataFrame( + { + "id": list("abcde"), + "v1": np.random.default_rng(2).standard_normal(5), + "v2": np.random.default_rng(2).standard_normal(5), + "dummy": list("abcde"), + "v3": np.random.default_rng(2).standard_normal(5), + }, + columns=["id", "v1", "v2", "dummy", "v3"], + ) + right = DataFrame( + { + "id": ["a", "b", np.nan, np.nan, np.nan], + "sv3": [1.234, 5.678, np.nan, np.nan, np.nan], + } + ) + + result = merge(left, right, on="id", how="left") + + rdf = right.drop(["id"], axis=1) + expected = left.join(rdf) + tm.assert_frame_equal(result, expected) + + def test_merge_na_keys(self): + data = [ + [1950, "A", 1.5], + [1950, "B", 1.5], + [1955, "B", 1.5], + [1960, "B", np.nan], + [1970, "B", 4.0], + [1950, "C", 4.0], + [1960, "C", np.nan], + [1965, "C", 3.0], + [1970, "C", 4.0], + ] + + frame = DataFrame(data, columns=["year", "panel", "data"]) + + other_data = [ + [1960, "A", np.nan], + [1970, "A", np.nan], + [1955, "A", np.nan], + [1965, "A", np.nan], + [1965, "B", np.nan], + [1955, "C", np.nan], + ] + other = DataFrame(other_data, columns=["year", "panel", "data"]) + + result = frame.merge(other, how="outer") + + expected = frame.fillna(-999).merge(other.fillna(-999), how="outer") + expected = expected.replace(-999, np.nan) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) + def test_merge_datetime_index(self, klass): + # see gh-19038 + df = DataFrame( + [1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"] + ) + df.index = pd.to_datetime(df.index) + on_vector = df.index.year + + if klass is not None: + on_vector = klass(on_vector) + + exp_years = np.array([2016, 2017, 2018], dtype=np.int32) + expected = DataFrame({"a": [1, 2, 3], "key_1": exp_years}) + + result = df.merge(df, on=["a", on_vector], how="inner") + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"key_0": exp_years, "a_x": [1, 2, 3], "a_y": [1, 2, 3]}) + + result = df.merge(df, on=[df.index.year], how="inner") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("merge_type", ["left", "right"]) + def test_merge_datetime_multi_index_empty_df(self, merge_type): + # see gh-36895 + + left = DataFrame( + data={ + "data": [1.5, 1.5], + }, + index=MultiIndex.from_tuples( + [[Timestamp("1950-01-01"), "A"], [Timestamp("1950-01-02"), "B"]], + names=["date", "panel"], + ), + ) + + right = DataFrame( + index=MultiIndex.from_tuples([], names=["date", "panel"]), columns=["state"] + ) + + expected_index = MultiIndex.from_tuples( + [[Timestamp("1950-01-01"), "A"], [Timestamp("1950-01-02"), "B"]], + names=["date", "panel"], + ) + + if merge_type == "left": + expected = DataFrame( + data={ + "data": [1.5, 1.5], + "state": np.array([np.nan, np.nan], dtype=object), + }, + index=expected_index, + ) + results_merge = left.merge(right, how="left", on=["date", "panel"]) + results_join = left.join(right, how="left") + else: + expected = DataFrame( + data={ + "state": np.array([np.nan, np.nan], dtype=object), + "data": [1.5, 1.5], + }, + index=expected_index, + ) + results_merge = right.merge(left, how="right", on=["date", "panel"]) + results_join = right.join(left, how="right") + + tm.assert_frame_equal(results_merge, expected) + tm.assert_frame_equal(results_join, expected) + + @pytest.fixture + def household(self): + household = DataFrame( + { + "household_id": [1, 2, 3], + "male": [0, 1, 0], + "wealth": [196087.3, 316478.7, 294750], + }, + columns=["household_id", "male", "wealth"], + ).set_index("household_id") + return household + + @pytest.fixture + def portfolio(self): + portfolio = DataFrame( + { + "household_id": [1, 2, 2, 3, 3, 3, 4], + "asset_id": [ + "nl0000301109", + "nl0000289783", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "nl0000289965", + np.nan, + ], + "name": [ + "ABN Amro", + "Robeco", + "Royal Dutch Shell", + "Royal Dutch Shell", + "AAB Eastern Europe Equity Fund", + "Postbank BioTech Fonds", + np.nan, + ], + "share": [1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0], + }, + columns=["household_id", "asset_id", "name", "share"], + ).set_index(["household_id", "asset_id"]) + return portfolio + + @pytest.fixture + def expected(self): + expected = ( + DataFrame( + { + "male": [0, 1, 1, 0, 0, 0], + "wealth": [ + 196087.3, + 316478.7, + 316478.7, + 294750.0, + 294750.0, + 294750.0, + ], + "name": [ + "ABN Amro", + "Robeco", + "Royal Dutch Shell", + "Royal Dutch Shell", + "AAB Eastern Europe Equity Fund", + "Postbank BioTech Fonds", + ], + "share": [1.00, 0.40, 0.60, 0.15, 0.60, 0.25], + "household_id": [1, 2, 2, 3, 3, 3], + "asset_id": [ + "nl0000301109", + "nl0000289783", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "nl0000289965", + ], + } + ) + .set_index(["household_id", "asset_id"]) + .reindex(columns=["male", "wealth", "name", "share"]) + ) + return expected + + def test_join_multi_levels(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + + # GH 3662 + # merge multi-levels + result = household.join(portfolio, how="inner") + tm.assert_frame_equal(result, expected) + + def test_join_multi_levels_merge_equivalence(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + + # equivalency + result = merge( + household.reset_index(), + portfolio.reset_index(), + on=["household_id"], + how="inner", + ).set_index(["household_id", "asset_id"]) + tm.assert_frame_equal(result, expected) + + def test_join_multi_levels_outer(self, portfolio, household, expected): + portfolio = portfolio.copy() + household = household.copy() + + result = household.join(portfolio, how="outer") + expected = concat( + [ + expected, + ( + DataFrame( + {"share": [1.00]}, + index=MultiIndex.from_tuples( + [(4, np.nan)], names=["household_id", "asset_id"] + ), + ) + ), + ], + axis=0, + sort=True, + ).reindex(columns=expected.columns) + tm.assert_frame_equal(result, expected, check_index_type=False) + + def test_join_multi_levels_invalid(self, portfolio, household): + portfolio = portfolio.copy() + household = household.copy() + + # invalid cases + household.index.name = "foo" + + with pytest.raises( + ValueError, match="cannot join with no overlapping index names" + ): + household.join(portfolio, how="inner") + + portfolio2 = portfolio.copy() + portfolio2.index.set_names(["household_id", "foo"]) + + with pytest.raises(ValueError, match="columns overlap but no suffix specified"): + portfolio2.join(portfolio, how="inner") + + def test_join_multi_levels2(self): + # some more advanced merges + # GH6360 + household = DataFrame( + { + "household_id": [1, 2, 2, 3, 3, 3, 4], + "asset_id": [ + "nl0000301109", + "nl0000301109", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "nl0000289965", + np.nan, + ], + "share": [1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0], + }, + columns=["household_id", "asset_id", "share"], + ).set_index(["household_id", "asset_id"]) + + log_return = DataFrame( + { + "asset_id": [ + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "lu0197800237", + ], + "t": [233, 234, 235, 180, 181], + "log_return": [ + 0.09604978, + -0.06524096, + 0.03532373, + 0.03025441, + 0.036997, + ], + } + ).set_index(["asset_id", "t"]) + + expected = ( + DataFrame( + { + "household_id": [2, 2, 2, 3, 3, 3, 3, 3], + "asset_id": [ + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "lu0197800237", + ], + "t": [233, 234, 235, 233, 234, 235, 180, 181], + "share": [0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6], + "log_return": [ + 0.09604978, + -0.06524096, + 0.03532373, + 0.09604978, + -0.06524096, + 0.03532373, + 0.03025441, + 0.036997, + ], + } + ) + .set_index(["household_id", "asset_id", "t"]) + .reindex(columns=["share", "log_return"]) + ) + + # this is the equivalency + result = merge( + household.reset_index(), + log_return.reset_index(), + on=["asset_id"], + how="inner", + ).set_index(["household_id", "asset_id", "t"]) + tm.assert_frame_equal(result, expected) + + expected = ( + DataFrame( + { + "household_id": [2, 2, 2, 3, 3, 3, 3, 3, 3, 1, 2, 4], + "asset_id": [ + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "gb00b03mlx29", + "lu0197800237", + "lu0197800237", + "nl0000289965", + "nl0000301109", + "nl0000301109", + None, + ], + "t": [ + 233, + 234, + 235, + 233, + 234, + 235, + 180, + 181, + None, + None, + None, + None, + ], + "share": [ + 0.6, + 0.6, + 0.6, + 0.15, + 0.15, + 0.15, + 0.6, + 0.6, + 0.25, + 1.0, + 0.4, + 1.0, + ], + "log_return": [ + 0.09604978, + -0.06524096, + 0.03532373, + 0.09604978, + -0.06524096, + 0.03532373, + 0.03025441, + 0.036997, + None, + None, + None, + None, + ], + } + ) + .set_index(["household_id", "asset_id", "t"]) + .reindex(columns=["share", "log_return"]) + ) + + result = merge( + household.reset_index(), + log_return.reset_index(), + on=["asset_id"], + how="outer", + ).set_index(["household_id", "asset_id", "t"]) + + tm.assert_frame_equal(result, expected) + + +class TestJoinMultiMulti: + def test_join_multi_multi(self, left_multi, right_multi, join_type, on_cols_multi): + left_names = left_multi.index.names + right_names = right_multi.index.names + if join_type == "right": + level_order = right_names + left_names.difference(right_names) + else: + level_order = left_names + right_names.difference(left_names) + # Multi-index join tests + expected = ( + merge( + left_multi.reset_index(), + right_multi.reset_index(), + how=join_type, + on=on_cols_multi, + ) + .set_index(level_order) + .sort_index() + ) + + result = left_multi.join(right_multi, how=join_type).sort_index() + tm.assert_frame_equal(result, expected) + + def test_join_multi_empty_frames( + self, left_multi, right_multi, join_type, on_cols_multi + ): + left_multi = left_multi.drop(columns=left_multi.columns) + right_multi = right_multi.drop(columns=right_multi.columns) + + left_names = left_multi.index.names + right_names = right_multi.index.names + if join_type == "right": + level_order = right_names + left_names.difference(right_names) + else: + level_order = left_names + right_names.difference(left_names) + + expected = ( + merge( + left_multi.reset_index(), + right_multi.reset_index(), + how=join_type, + on=on_cols_multi, + ) + .set_index(level_order) + .sort_index() + ) + + result = left_multi.join(right_multi, how=join_type).sort_index() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("box", [None, np.asarray, Series, Index]) + def test_merge_datetime_index(self, box): + # see gh-19038 + df = DataFrame( + [1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"] + ) + df.index = pd.to_datetime(df.index) + on_vector = df.index.year + + if box is not None: + on_vector = box(on_vector) + + exp_years = np.array([2016, 2017, 2018], dtype=np.int32) + expected = DataFrame({"a": [1, 2, 3], "key_1": exp_years}) + + result = df.merge(df, on=["a", on_vector], how="inner") + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"key_0": exp_years, "a_x": [1, 2, 3], "a_y": [1, 2, 3]}) + + result = df.merge(df, on=[df.index.year], how="inner") + tm.assert_frame_equal(result, expected) + + def test_single_common_level(self): + index_left = MultiIndex.from_tuples( + [("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"] + ) + + left = DataFrame( + {"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=index_left + ) + + index_right = MultiIndex.from_tuples( + [("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"] + ) + + right = DataFrame( + {"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]}, + index=index_right, + ) + + result = left.join(right) + expected = merge( + left.reset_index(), right.reset_index(), on=["key"], how="inner" + ).set_index(["key", "X", "Y"]) + + tm.assert_frame_equal(result, expected) + + def test_join_multi_wrong_order(self): + # GH 25760 + # GH 28956 + + midx1 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + midx3 = MultiIndex.from_tuples([(4, 1), (3, 2), (3, 1)], names=["b", "a"]) + + left = DataFrame(index=midx1, data={"x": [10, 20, 30, 40]}) + right = DataFrame(index=midx3, data={"y": ["foo", "bar", "fing"]}) + + result = left.join(right) + + expected = DataFrame( + index=midx1, + data={"x": [10, 20, 30, 40], "y": ["fing", "foo", "bar", np.nan]}, + ) + + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_crosstab.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_crosstab.py new file mode 100644 index 0000000000000000000000000000000000000000..136e76986df9d86afbf5a6ca8e3650d7745dae3a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_crosstab.py @@ -0,0 +1,886 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalDtype, + CategoricalIndex, + DataFrame, + Index, + MultiIndex, + Series, + crosstab, +) +import pandas._testing as tm + + +@pytest.fixture +def df(): + df = DataFrame( + { + "A": [ + "foo", + "foo", + "foo", + "foo", + "bar", + "bar", + "bar", + "bar", + "foo", + "foo", + "foo", + ], + "B": [ + "one", + "one", + "one", + "two", + "one", + "one", + "one", + "two", + "two", + "two", + "one", + ], + "C": [ + "dull", + "dull", + "shiny", + "dull", + "dull", + "shiny", + "shiny", + "dull", + "shiny", + "shiny", + "shiny", + ], + "D": np.random.default_rng(2).standard_normal(11), + "E": np.random.default_rng(2).standard_normal(11), + "F": np.random.default_rng(2).standard_normal(11), + } + ) + + return pd.concat([df, df], ignore_index=True) + + +class TestCrosstab: + def test_crosstab_single(self, df): + result = crosstab(df["A"], df["C"]) + expected = df.groupby(["A", "C"]).size().unstack() + tm.assert_frame_equal(result, expected.fillna(0).astype(np.int64)) + + def test_crosstab_multiple(self, df): + result = crosstab(df["A"], [df["B"], df["C"]]) + expected = df.groupby(["A", "B", "C"]).size() + expected = expected.unstack("B").unstack("C").fillna(0).astype(np.int64) + tm.assert_frame_equal(result, expected) + + result = crosstab([df["B"], df["C"]], df["A"]) + expected = df.groupby(["B", "C", "A"]).size() + expected = expected.unstack("A").fillna(0).astype(np.int64) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("box", [np.array, list, tuple]) + def test_crosstab_ndarray(self, box): + # GH 44076 + a = box(np.random.default_rng(2).integers(0, 5, size=100)) + b = box(np.random.default_rng(2).integers(0, 3, size=100)) + c = box(np.random.default_rng(2).integers(0, 10, size=100)) + + df = DataFrame({"a": a, "b": b, "c": c}) + + result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c")) + expected = crosstab(df["a"], [df["b"], df["c"]]) + tm.assert_frame_equal(result, expected) + + result = crosstab([b, c], a, colnames=["a"], rownames=("b", "c")) + expected = crosstab([df["b"], df["c"]], df["a"]) + tm.assert_frame_equal(result, expected) + + # assign arbitrary names + result = crosstab(a, c) + expected = crosstab(df["a"], df["c"]) + expected.index.names = ["row_0"] + expected.columns.names = ["col_0"] + tm.assert_frame_equal(result, expected) + + def test_crosstab_non_aligned(self): + # GH 17005 + a = Series([0, 1, 1], index=["a", "b", "c"]) + b = Series([3, 4, 3, 4, 3], index=["a", "b", "c", "d", "f"]) + c = np.array([3, 4, 3], dtype=np.int64) + + expected = DataFrame( + [[1, 0], [1, 1]], + index=Index([0, 1], name="row_0"), + columns=Index([3, 4], name="col_0"), + ) + + result = crosstab(a, b) + tm.assert_frame_equal(result, expected) + + result = crosstab(a, c) + tm.assert_frame_equal(result, expected) + + def test_crosstab_margins(self): + a = np.random.default_rng(2).integers(0, 7, size=100) + b = np.random.default_rng(2).integers(0, 3, size=100) + c = np.random.default_rng(2).integers(0, 5, size=100) + + df = DataFrame({"a": a, "b": b, "c": c}) + + result = crosstab(a, [b, c], rownames=["a"], colnames=("b", "c"), margins=True) + + assert result.index.names == ("a",) + assert result.columns.names == ["b", "c"] + + all_cols = result["All", ""] + exp_cols = df.groupby(["a"]).size().astype("i8") + # to keep index.name + exp_margin = Series([len(df)], index=Index(["All"], name="a")) + exp_cols = pd.concat([exp_cols, exp_margin]) + exp_cols.name = ("All", "") + + tm.assert_series_equal(all_cols, exp_cols) + + all_rows = result.loc["All"] + exp_rows = df.groupby(["b", "c"]).size().astype("i8") + exp_rows = pd.concat([exp_rows, Series([len(df)], index=[("All", "")])]) + exp_rows.name = "All" + + exp_rows = exp_rows.reindex(all_rows.index) + exp_rows = exp_rows.fillna(0).astype(np.int64) + tm.assert_series_equal(all_rows, exp_rows) + + def test_crosstab_margins_set_margin_name(self): + # GH 15972 + a = np.random.default_rng(2).integers(0, 7, size=100) + b = np.random.default_rng(2).integers(0, 3, size=100) + c = np.random.default_rng(2).integers(0, 5, size=100) + + df = DataFrame({"a": a, "b": b, "c": c}) + + result = crosstab( + a, + [b, c], + rownames=["a"], + colnames=("b", "c"), + margins=True, + margins_name="TOTAL", + ) + + assert result.index.names == ("a",) + assert result.columns.names == ["b", "c"] + + all_cols = result["TOTAL", ""] + exp_cols = df.groupby(["a"]).size().astype("i8") + # to keep index.name + exp_margin = Series([len(df)], index=Index(["TOTAL"], name="a")) + exp_cols = pd.concat([exp_cols, exp_margin]) + exp_cols.name = ("TOTAL", "") + + tm.assert_series_equal(all_cols, exp_cols) + + all_rows = result.loc["TOTAL"] + exp_rows = df.groupby(["b", "c"]).size().astype("i8") + exp_rows = pd.concat([exp_rows, Series([len(df)], index=[("TOTAL", "")])]) + exp_rows.name = "TOTAL" + + exp_rows = exp_rows.reindex(all_rows.index) + exp_rows = exp_rows.fillna(0).astype(np.int64) + tm.assert_series_equal(all_rows, exp_rows) + + msg = "margins_name argument must be a string" + for margins_name in [666, None, ["a", "b"]]: + with pytest.raises(ValueError, match=msg): + crosstab( + a, + [b, c], + rownames=["a"], + colnames=("b", "c"), + margins=True, + margins_name=margins_name, + ) + + def test_crosstab_pass_values(self): + a = np.random.default_rng(2).integers(0, 7, size=100) + b = np.random.default_rng(2).integers(0, 3, size=100) + c = np.random.default_rng(2).integers(0, 5, size=100) + values = np.random.default_rng(2).standard_normal(100) + + table = crosstab( + [a, b], c, values, aggfunc="sum", rownames=["foo", "bar"], colnames=["baz"] + ) + + df = DataFrame({"foo": a, "bar": b, "baz": c, "values": values}) + + expected = df.pivot_table( + "values", index=["foo", "bar"], columns="baz", aggfunc="sum" + ) + tm.assert_frame_equal(table, expected) + + def test_crosstab_dropna(self): + # GH 3820 + a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object) + b = np.array(["one", "one", "two", "one", "two", "two", "two"], dtype=object) + c = np.array( + ["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object + ) + res = crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"], dropna=False) + m = MultiIndex.from_tuples( + [("one", "dull"), ("one", "shiny"), ("two", "dull"), ("two", "shiny")], + names=["b", "c"], + ) + tm.assert_index_equal(res.columns, m) + + def test_crosstab_no_overlap(self): + # GS 10291 + + s1 = Series([1, 2, 3], index=[1, 2, 3]) + s2 = Series([4, 5, 6], index=[4, 5, 6]) + + actual = crosstab(s1, s2) + expected = DataFrame( + index=Index([], dtype="int64", name="row_0"), + columns=Index([], dtype="int64", name="col_0"), + ) + + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna(self): + # GH 12577 + # pivot_table counts null into margin ('All') + # when margins=true and dropna=true + + df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]}) + actual = crosstab(df.a, df.b, margins=True, dropna=True) + expected = DataFrame([[1, 0, 1], [1, 3, 4], [2, 3, 5]]) + expected.index = Index([1.0, 2.0, "All"], name="a") + expected.columns = Index([3, 4, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna2(self): + df = DataFrame( + {"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]} + ) + actual = crosstab(df.a, df.b, margins=True, dropna=True) + expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]]) + expected.index = Index([1.0, 2.0, "All"], name="a") + expected.columns = Index([3.0, 4.0, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna3(self): + df = DataFrame( + {"a": [1, np.nan, np.nan, np.nan, np.nan, 2], "b": [3, 3, 4, 4, 4, 4]} + ) + actual = crosstab(df.a, df.b, margins=True, dropna=True) + expected = DataFrame([[1, 0, 1], [0, 1, 1], [1, 1, 2]]) + expected.index = Index([1.0, 2.0, "All"], name="a") + expected.columns = Index([3, 4, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna4(self): + # GH 12642 + # _add_margins raises KeyError: Level None not found + # when margins=True and dropna=False + # GH: 10772: Keep np.nan in result with dropna=False + df = DataFrame({"a": [1, 2, 2, 2, 2, np.nan], "b": [3, 3, 4, 4, 4, 4]}) + actual = crosstab(df.a, df.b, margins=True, dropna=False) + expected = DataFrame([[1, 0, 1.0], [1, 3, 4.0], [0, 1, np.nan], [2, 4, 6.0]]) + expected.index = Index([1.0, 2.0, np.nan, "All"], name="a") + expected.columns = Index([3, 4, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna5(self): + # GH: 10772: Keep np.nan in result with dropna=False + df = DataFrame( + {"a": [1, np.nan, np.nan, np.nan, 2, np.nan], "b": [3, np.nan, 4, 4, 4, 4]} + ) + actual = crosstab(df.a, df.b, margins=True, dropna=False) + expected = DataFrame( + [[1, 0, 0, 1.0], [0, 1, 0, 1.0], [0, 3, 1, np.nan], [1, 4, 0, 6.0]] + ) + expected.index = Index([1.0, 2.0, np.nan, "All"], name="a") + expected.columns = Index([3.0, 4.0, np.nan, "All"], name="b") + tm.assert_frame_equal(actual, expected) + + def test_margin_dropna6(self): + # GH: 10772: Keep np.nan in result with dropna=False + a = np.array(["foo", "foo", "foo", "bar", "bar", "foo", "foo"], dtype=object) + b = np.array(["one", "one", "two", "one", "two", np.nan, "two"], dtype=object) + c = np.array( + ["dull", "dull", "dull", "dull", "dull", "shiny", "shiny"], dtype=object + ) + + actual = crosstab( + a, [b, c], rownames=["a"], colnames=["b", "c"], margins=True, dropna=False + ) + m = MultiIndex.from_arrays( + [ + ["one", "one", "two", "two", np.nan, np.nan, "All"], + ["dull", "shiny", "dull", "shiny", "dull", "shiny", ""], + ], + names=["b", "c"], + ) + expected = DataFrame( + [[1, 0, 1, 0, 0, 0, 2], [2, 0, 1, 1, 0, 1, 5], [3, 0, 2, 1, 0, 0, 7]], + columns=m, + ) + expected.index = Index(["bar", "foo", "All"], name="a") + tm.assert_frame_equal(actual, expected) + + actual = crosstab( + [a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=False + ) + m = MultiIndex.from_arrays( + [ + ["bar", "bar", "bar", "foo", "foo", "foo", "All"], + ["one", "two", np.nan, "one", "two", np.nan, ""], + ], + names=["a", "b"], + ) + expected = DataFrame( + [ + [1, 0, 1.0], + [1, 0, 1.0], + [0, 0, np.nan], + [2, 0, 2.0], + [1, 1, 2.0], + [0, 1, np.nan], + [5, 2, 7.0], + ], + index=m, + ) + expected.columns = Index(["dull", "shiny", "All"], name="c") + tm.assert_frame_equal(actual, expected) + + actual = crosstab( + [a, b], c, rownames=["a", "b"], colnames=["c"], margins=True, dropna=True + ) + m = MultiIndex.from_arrays( + [["bar", "bar", "foo", "foo", "All"], ["one", "two", "one", "two", ""]], + names=["a", "b"], + ) + expected = DataFrame( + [[1, 0, 1], [1, 0, 1], [2, 0, 2], [1, 1, 2], [5, 1, 6]], index=m + ) + expected.columns = Index(["dull", "shiny", "All"], name="c") + tm.assert_frame_equal(actual, expected) + + def test_crosstab_normalize(self): + # Issue 12578 + df = DataFrame( + {"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]} + ) + + rindex = Index([1, 2], name="a") + cindex = Index([3, 4], name="b") + full_normal = DataFrame([[0.2, 0], [0.2, 0.6]], index=rindex, columns=cindex) + row_normal = DataFrame([[1.0, 0], [0.25, 0.75]], index=rindex, columns=cindex) + col_normal = DataFrame([[0.5, 0], [0.5, 1.0]], index=rindex, columns=cindex) + + # Check all normalize args + tm.assert_frame_equal(crosstab(df.a, df.b, normalize="all"), full_normal) + tm.assert_frame_equal(crosstab(df.a, df.b, normalize=True), full_normal) + tm.assert_frame_equal(crosstab(df.a, df.b, normalize="index"), row_normal) + tm.assert_frame_equal(crosstab(df.a, df.b, normalize="columns"), col_normal) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize=1), + crosstab(df.a, df.b, normalize="columns"), + ) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize=0), crosstab(df.a, df.b, normalize="index") + ) + + row_normal_margins = DataFrame( + [[1.0, 0], [0.25, 0.75], [0.4, 0.6]], + index=Index([1, 2, "All"], name="a", dtype="object"), + columns=Index([3, 4], name="b", dtype="object"), + ) + col_normal_margins = DataFrame( + [[0.5, 0, 0.2], [0.5, 1.0, 0.8]], + index=Index([1, 2], name="a", dtype="object"), + columns=Index([3, 4, "All"], name="b", dtype="object"), + ) + + all_normal_margins = DataFrame( + [[0.2, 0, 0.2], [0.2, 0.6, 0.8], [0.4, 0.6, 1]], + index=Index([1, 2, "All"], name="a", dtype="object"), + columns=Index([3, 4, "All"], name="b", dtype="object"), + ) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize="index", margins=True), row_normal_margins + ) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize="columns", margins=True), col_normal_margins + ) + tm.assert_frame_equal( + crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins + ) + + def test_crosstab_normalize_arrays(self): + # GH#12578 + df = DataFrame( + {"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]} + ) + + # Test arrays + crosstab( + [np.array([1, 1, 2, 2]), np.array([1, 2, 1, 2])], np.array([1, 2, 1, 2]) + ) + + # Test with aggfunc + norm_counts = DataFrame( + [[0.25, 0, 0.25], [0.25, 0.5, 0.75], [0.5, 0.5, 1]], + index=Index([1, 2, "All"], name="a", dtype="object"), + columns=Index([3, 4, "All"], name="b"), + ) + test_case = crosstab( + df.a, df.b, df.c, aggfunc="count", normalize="all", margins=True + ) + tm.assert_frame_equal(test_case, norm_counts) + + df = DataFrame( + {"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [0, 4, np.nan, 3, 3]} + ) + + norm_sum = DataFrame( + [[0, 0, 0.0], [0.4, 0.6, 1], [0.4, 0.6, 1]], + index=Index([1, 2, "All"], name="a", dtype="object"), + columns=Index([3, 4, "All"], name="b", dtype="object"), + ) + msg = "using DataFrameGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + test_case = crosstab( + df.a, df.b, df.c, aggfunc=np.sum, normalize="all", margins=True + ) + tm.assert_frame_equal(test_case, norm_sum) + + def test_crosstab_with_empties(self, using_array_manager): + # Check handling of empties + df = DataFrame( + { + "a": [1, 2, 2, 2, 2], + "b": [3, 3, 4, 4, 4], + "c": [np.nan, np.nan, np.nan, np.nan, np.nan], + } + ) + + empty = DataFrame( + [[0.0, 0.0], [0.0, 0.0]], + index=Index([1, 2], name="a", dtype="int64"), + columns=Index([3, 4], name="b"), + ) + + for i in [True, "index", "columns"]: + calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=i) + tm.assert_frame_equal(empty, calculated) + + nans = DataFrame( + [[0.0, np.nan], [0.0, 0.0]], + index=Index([1, 2], name="a", dtype="int64"), + columns=Index([3, 4], name="b"), + ) + if using_array_manager: + # INFO(ArrayManager) column without NaNs can preserve int dtype + nans[3] = nans[3].astype("int64") + + calculated = crosstab(df.a, df.b, values=df.c, aggfunc="count", normalize=False) + tm.assert_frame_equal(nans, calculated) + + def test_crosstab_errors(self): + # Issue 12578 + + df = DataFrame( + {"a": [1, 2, 2, 2, 2], "b": [3, 3, 4, 4, 4], "c": [1, 1, np.nan, 1, 1]} + ) + + error = "values cannot be used without an aggfunc." + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, values=df.c) + + error = "aggfunc cannot be used without values" + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, aggfunc=np.mean) + + error = "Not a valid normalize argument" + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, normalize="42") + + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, normalize=42) + + error = "Not a valid margins argument" + with pytest.raises(ValueError, match=error): + crosstab(df.a, df.b, normalize="all", margins=42) + + def test_crosstab_with_categorial_columns(self): + # GH 8860 + df = DataFrame( + { + "MAKE": ["Honda", "Acura", "Tesla", "Honda", "Honda", "Acura"], + "MODEL": ["Sedan", "Sedan", "Electric", "Pickup", "Sedan", "Sedan"], + } + ) + categories = ["Sedan", "Electric", "Pickup"] + df["MODEL"] = df["MODEL"].astype("category").cat.set_categories(categories) + result = crosstab(df["MAKE"], df["MODEL"]) + + expected_index = Index(["Acura", "Honda", "Tesla"], name="MAKE") + expected_columns = CategoricalIndex( + categories, categories=categories, ordered=False, name="MODEL" + ) + expected_data = [[2, 0, 0], [2, 0, 1], [0, 1, 0]] + expected = DataFrame( + expected_data, index=expected_index, columns=expected_columns + ) + tm.assert_frame_equal(result, expected) + + def test_crosstab_with_numpy_size(self): + # GH 4003 + df = DataFrame( + { + "A": ["one", "one", "two", "three"] * 6, + "B": ["A", "B", "C"] * 8, + "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4, + "D": np.random.default_rng(2).standard_normal(24), + "E": np.random.default_rng(2).standard_normal(24), + } + ) + result = crosstab( + index=[df["A"], df["B"]], + columns=[df["C"]], + margins=True, + aggfunc=np.size, + values=df["D"], + ) + expected_index = MultiIndex( + levels=[["All", "one", "three", "two"], ["", "A", "B", "C"]], + codes=[[1, 1, 1, 2, 2, 2, 3, 3, 3, 0], [1, 2, 3, 1, 2, 3, 1, 2, 3, 0]], + names=["A", "B"], + ) + expected_column = Index(["bar", "foo", "All"], name="C") + expected_data = np.array( + [ + [2.0, 2.0, 4.0], + [2.0, 2.0, 4.0], + [2.0, 2.0, 4.0], + [2.0, np.nan, 2.0], + [np.nan, 2.0, 2.0], + [2.0, np.nan, 2.0], + [np.nan, 2.0, 2.0], + [2.0, np.nan, 2.0], + [np.nan, 2.0, 2.0], + [12.0, 12.0, 24.0], + ] + ) + expected = DataFrame( + expected_data, index=expected_index, columns=expected_column + ) + # aggfunc is np.size, resulting in integers + expected["All"] = expected["All"].astype("int64") + tm.assert_frame_equal(result, expected) + + def test_crosstab_duplicate_names(self): + # GH 13279 / 22529 + + s1 = Series(range(3), name="foo") + s2_foo = Series(range(1, 4), name="foo") + s2_bar = Series(range(1, 4), name="bar") + s3 = Series(range(3), name="waldo") + + # check result computed with duplicate labels against + # result computed with unique labels, then relabelled + mapper = {"bar": "foo"} + + # duplicate row, column labels + result = crosstab(s1, s2_foo) + expected = crosstab(s1, s2_bar).rename_axis(columns=mapper, axis=1) + tm.assert_frame_equal(result, expected) + + # duplicate row, unique column labels + result = crosstab([s1, s2_foo], s3) + expected = crosstab([s1, s2_bar], s3).rename_axis(index=mapper, axis=0) + tm.assert_frame_equal(result, expected) + + # unique row, duplicate column labels + result = crosstab(s3, [s1, s2_foo]) + expected = crosstab(s3, [s1, s2_bar]).rename_axis(columns=mapper, axis=1) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("names", [["a", ("b", "c")], [("a", "b"), "c"]]) + def test_crosstab_tuple_name(self, names): + s1 = Series(range(3), name=names[0]) + s2 = Series(range(1, 4), name=names[1]) + + mi = MultiIndex.from_arrays([range(3), range(1, 4)], names=names) + expected = Series(1, index=mi).unstack(1, fill_value=0) + + result = crosstab(s1, s2) + tm.assert_frame_equal(result, expected) + + def test_crosstab_both_tuple_names(self): + # GH 18321 + s1 = Series(range(3), name=("a", "b")) + s2 = Series(range(3), name=("c", "d")) + + expected = DataFrame( + np.eye(3, dtype="int64"), + index=Index(range(3), name=("a", "b")), + columns=Index(range(3), name=("c", "d")), + ) + result = crosstab(s1, s2) + tm.assert_frame_equal(result, expected) + + def test_crosstab_unsorted_order(self): + df = DataFrame({"b": [3, 1, 2], "a": [5, 4, 6]}, index=["C", "A", "B"]) + result = crosstab(df.index, [df.b, df.a]) + e_idx = Index(["A", "B", "C"], name="row_0") + e_columns = MultiIndex.from_tuples([(1, 4), (2, 6), (3, 5)], names=["b", "a"]) + expected = DataFrame( + [[1, 0, 0], [0, 1, 0], [0, 0, 1]], index=e_idx, columns=e_columns + ) + tm.assert_frame_equal(result, expected) + + def test_crosstab_normalize_multiple_columns(self): + # GH 15150 + df = DataFrame( + { + "A": ["one", "one", "two", "three"] * 6, + "B": ["A", "B", "C"] * 8, + "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4, + "D": [0] * 24, + "E": [0] * 24, + } + ) + + msg = "using DataFrameGroupBy.sum" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = crosstab( + [df.A, df.B], + df.C, + values=df.D, + aggfunc=np.sum, + normalize=True, + margins=True, + ) + expected = DataFrame( + np.array([0] * 29 + [1], dtype=float).reshape(10, 3), + columns=Index(["bar", "foo", "All"], name="C"), + index=MultiIndex.from_tuples( + [ + ("one", "A"), + ("one", "B"), + ("one", "C"), + ("three", "A"), + ("three", "B"), + ("three", "C"), + ("two", "A"), + ("two", "B"), + ("two", "C"), + ("All", ""), + ], + names=["A", "B"], + ), + ) + tm.assert_frame_equal(result, expected) + + def test_margin_normalize(self): + # GH 27500 + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + # normalize on index + result = crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=0 + ) + expected = DataFrame( + [[0.5, 0.5], [0.5, 0.5], [0.666667, 0.333333], [0, 1], [0.444444, 0.555556]] + ) + expected.index = MultiIndex( + levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + expected.columns = Index(["large", "small"], name="C") + tm.assert_frame_equal(result, expected) + + # normalize on columns + result = crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=1 + ) + expected = DataFrame( + [ + [0.25, 0.2, 0.222222], + [0.25, 0.2, 0.222222], + [0.5, 0.2, 0.333333], + [0, 0.4, 0.222222], + ] + ) + expected.columns = Index(["large", "small", "Sub-Total"], name="C") + expected.index = MultiIndex( + levels=[["bar", "foo"], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=["A", "B"], + ) + tm.assert_frame_equal(result, expected) + + # normalize on both index and column + result = crosstab( + [df.A, df.B], df.C, margins=True, margins_name="Sub-Total", normalize=True + ) + expected = DataFrame( + [ + [0.111111, 0.111111, 0.222222], + [0.111111, 0.111111, 0.222222], + [0.222222, 0.111111, 0.333333], + [0.000000, 0.222222, 0.222222], + [0.444444, 0.555555, 1], + ] + ) + expected.columns = Index(["large", "small", "Sub-Total"], name="C") + expected.index = MultiIndex( + levels=[["Sub-Total", "bar", "foo"], ["", "one", "two"]], + codes=[[1, 1, 2, 2, 0], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + tm.assert_frame_equal(result, expected) + + def test_margin_normalize_multiple_columns(self): + # GH 35144 + # use multiple columns with margins and normalization + df = DataFrame( + { + "A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"], + "B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"], + "C": [ + "small", + "large", + "large", + "small", + "small", + "large", + "small", + "small", + "large", + ], + "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], + "E": [2, 4, 5, 5, 6, 6, 8, 9, 9], + } + ) + result = crosstab( + index=df.C, + columns=[df.A, df.B], + margins=True, + margins_name="margin", + normalize=True, + ) + expected = DataFrame( + [ + [0.111111, 0.111111, 0.222222, 0.000000, 0.444444], + [0.111111, 0.111111, 0.111111, 0.222222, 0.555556], + [0.222222, 0.222222, 0.333333, 0.222222, 1.0], + ], + index=["large", "small", "margin"], + ) + expected.columns = MultiIndex( + levels=[["bar", "foo", "margin"], ["", "one", "two"]], + codes=[[0, 0, 1, 1, 2], [1, 2, 1, 2, 0]], + names=["A", "B"], + ) + expected.index.name = "C" + tm.assert_frame_equal(result, expected) + + def test_margin_support_Float(self): + # GH 50313 + # use Float64 formats and function aggfunc with margins + df = DataFrame( + {"A": [1, 2, 2, 1], "B": [3, 3, 4, 5], "C": [-1.0, 10.0, 1.0, 10.0]}, + dtype="Float64", + ) + result = crosstab( + df["A"], + df["B"], + values=df["C"], + aggfunc="sum", + margins=True, + ) + expected = DataFrame( + [ + [-1.0, pd.NA, 10.0, 9.0], + [10.0, 1.0, pd.NA, 11.0], + [9.0, 1.0, 10.0, 20.0], + ], + index=Index([1.0, 2.0, "All"], dtype="object", name="A"), + columns=Index([3.0, 4.0, 5.0, "All"], dtype="object", name="B"), + dtype="Float64", + ) + tm.assert_frame_equal(result, expected) + + def test_margin_with_ordered_categorical_column(self): + # GH 25278 + df = DataFrame( + { + "First": ["B", "B", "C", "A", "B", "C"], + "Second": ["C", "B", "B", "B", "C", "A"], + } + ) + df["First"] = df["First"].astype(CategoricalDtype(ordered=True)) + customized_categories_order = ["C", "A", "B"] + df["First"] = df["First"].cat.reorder_categories(customized_categories_order) + result = crosstab(df["First"], df["Second"], margins=True) + + expected_index = Index(["C", "A", "B", "All"], name="First") + expected_columns = Index(["A", "B", "C", "All"], name="Second") + expected_data = [[1, 1, 0, 2], [0, 1, 0, 1], [0, 1, 2, 3], [1, 3, 2, 6]] + expected = DataFrame( + expected_data, index=expected_index, columns=expected_columns + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("a_dtype", ["category", "int64"]) +@pytest.mark.parametrize("b_dtype", ["category", "int64"]) +def test_categoricals(a_dtype, b_dtype): + # https://github.com/pandas-dev/pandas/issues/37465 + g = np.random.default_rng(2) + a = Series(g.integers(0, 3, size=100)).astype(a_dtype) + b = Series(g.integers(0, 2, size=100)).astype(b_dtype) + result = crosstab(a, b, margins=True, dropna=False) + columns = Index([0, 1, "All"], dtype="object", name="col_0") + index = Index([0, 1, 2, "All"], dtype="object", name="row_0") + values = [[10, 18, 28], [23, 16, 39], [17, 16, 33], [50, 50, 100]] + expected = DataFrame(values, index, columns) + tm.assert_frame_equal(result, expected) + + # Verify when categorical does not have all values present + a.loc[a == 1] = 2 + a_is_cat = isinstance(a.dtype, CategoricalDtype) + assert not a_is_cat or a.value_counts().loc[1] == 0 + result = crosstab(a, b, margins=True, dropna=False) + values = [[10, 18, 28], [0, 0, 0], [40, 32, 72], [50, 50, 100]] + expected = DataFrame(values, index, columns) + if not a_is_cat: + expected = expected.loc[[0, 2, "All"]] + expected["All"] = expected["All"].astype("int64") + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_cut.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_cut.py new file mode 100644 index 0000000000000000000000000000000000000000..0811c69859c0dadf9b3b909437d9a6a16a584c24 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_cut.py @@ -0,0 +1,791 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + Interval, + IntervalIndex, + Series, + TimedeltaIndex, + Timestamp, + cut, + date_range, + interval_range, + isna, + qcut, + timedelta_range, + to_datetime, +) +import pandas._testing as tm +from pandas.api.types import CategoricalDtype +import pandas.core.reshape.tile as tmod + + +def test_simple(): + data = np.ones(5, dtype="int64") + result = cut(data, 4, labels=False) + + expected = np.array([1, 1, 1, 1, 1]) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + +@pytest.mark.parametrize("func", [list, np.array]) +def test_bins(func): + data = func([0.2, 1.4, 2.5, 6.2, 9.7, 2.1]) + result, bins = cut(data, 3, retbins=True) + + intervals = IntervalIndex.from_breaks(bins.round(3)) + intervals = intervals.take([0, 0, 0, 1, 2, 0]) + expected = Categorical(intervals, ordered=True) + + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.1905, 3.36666667, 6.53333333, 9.7])) + + +def test_right(): + data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) + result, bins = cut(data, 4, right=True, retbins=True) + + intervals = IntervalIndex.from_breaks(bins.round(3)) + expected = Categorical(intervals, ordered=True) + expected = expected.take([0, 0, 0, 2, 3, 0, 0]) + + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.1905, 2.575, 4.95, 7.325, 9.7])) + + +def test_no_right(): + data = np.array([0.2, 1.4, 2.5, 6.2, 9.7, 2.1, 2.575]) + result, bins = cut(data, 4, right=False, retbins=True) + + intervals = IntervalIndex.from_breaks(bins.round(3), closed="left") + intervals = intervals.take([0, 0, 0, 2, 3, 0, 1]) + expected = Categorical(intervals, ordered=True) + + tm.assert_categorical_equal(result, expected) + tm.assert_almost_equal(bins, np.array([0.2, 2.575, 4.95, 7.325, 9.7095])) + + +def test_bins_from_interval_index(): + c = cut(range(5), 3) + expected = c + result = cut(range(5), bins=expected.categories) + tm.assert_categorical_equal(result, expected) + + expected = Categorical.from_codes( + np.append(c.codes, -1), categories=c.categories, ordered=True + ) + result = cut(range(6), bins=expected.categories) + tm.assert_categorical_equal(result, expected) + + +def test_bins_from_interval_index_doc_example(): + # Make sure we preserve the bins. + ages = np.array([10, 15, 13, 12, 23, 25, 28, 59, 60]) + c = cut(ages, bins=[0, 18, 35, 70]) + expected = IntervalIndex.from_tuples([(0, 18), (18, 35), (35, 70)]) + tm.assert_index_equal(c.categories, expected) + + result = cut([25, 20, 50], bins=c.categories) + tm.assert_index_equal(result.categories, expected) + tm.assert_numpy_array_equal(result.codes, np.array([1, 1, 2], dtype="int8")) + + +def test_bins_not_overlapping_from_interval_index(): + # see gh-23980 + msg = "Overlapping IntervalIndex is not accepted" + ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)]) + + with pytest.raises(ValueError, match=msg): + cut([5, 6], bins=ii) + + +def test_bins_not_monotonic(): + msg = "bins must increase monotonically" + data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1] + + with pytest.raises(ValueError, match=msg): + cut(data, [0.1, 1.5, 1, 10]) + + +@pytest.mark.parametrize( + "x, bins, expected", + [ + ( + date_range("2017-12-31", periods=3), + [Timestamp.min, Timestamp("2018-01-01"), Timestamp.max], + IntervalIndex.from_tuples( + [ + (Timestamp.min, Timestamp("2018-01-01")), + (Timestamp("2018-01-01"), Timestamp.max), + ] + ), + ), + ( + [-1, 0, 1], + np.array( + [np.iinfo(np.int64).min, 0, np.iinfo(np.int64).max], dtype="int64" + ), + IntervalIndex.from_tuples( + [(np.iinfo(np.int64).min, 0), (0, np.iinfo(np.int64).max)] + ), + ), + ( + [ + np.timedelta64(-1, "ns"), + np.timedelta64(0, "ns"), + np.timedelta64(1, "ns"), + ], + np.array( + [ + np.timedelta64(-np.iinfo(np.int64).max, "ns"), + np.timedelta64(0, "ns"), + np.timedelta64(np.iinfo(np.int64).max, "ns"), + ] + ), + IntervalIndex.from_tuples( + [ + ( + np.timedelta64(-np.iinfo(np.int64).max, "ns"), + np.timedelta64(0, "ns"), + ), + ( + np.timedelta64(0, "ns"), + np.timedelta64(np.iinfo(np.int64).max, "ns"), + ), + ] + ), + ), + ], +) +def test_bins_monotonic_not_overflowing(x, bins, expected): + # GH 26045 + result = cut(x, bins) + tm.assert_index_equal(result.categories, expected) + + +def test_wrong_num_labels(): + msg = "Bin labels must be one fewer than the number of bin edges" + data = [0.2, 1.4, 2.5, 6.2, 9.7, 2.1] + + with pytest.raises(ValueError, match=msg): + cut(data, [0, 1, 10], labels=["foo", "bar", "baz"]) + + +@pytest.mark.parametrize( + "x,bins,msg", + [ + ([], 2, "Cannot cut empty array"), + ([1, 2, 3], 0.5, "`bins` should be a positive integer"), + ], +) +def test_cut_corner(x, bins, msg): + with pytest.raises(ValueError, match=msg): + cut(x, bins) + + +@pytest.mark.parametrize("arg", [2, np.eye(2), DataFrame(np.eye(2))]) +@pytest.mark.parametrize("cut_func", [cut, qcut]) +def test_cut_not_1d_arg(arg, cut_func): + msg = "Input array must be 1 dimensional" + with pytest.raises(ValueError, match=msg): + cut_func(arg, 2) + + +@pytest.mark.parametrize( + "data", + [ + [0, 1, 2, 3, 4, np.inf], + [-np.inf, 0, 1, 2, 3, 4], + [-np.inf, 0, 1, 2, 3, 4, np.inf], + ], +) +def test_int_bins_with_inf(data): + # GH 24314 + msg = "cannot specify integer `bins` when input data contains infinity" + with pytest.raises(ValueError, match=msg): + cut(data, bins=3) + + +def test_cut_out_of_range_more(): + # see gh-1511 + name = "x" + + ser = Series([0, -1, 0, 1, -3], name=name) + ind = cut(ser, [0, 1], labels=False) + + exp = Series([np.nan, np.nan, np.nan, 0, np.nan], name=name) + tm.assert_series_equal(ind, exp) + + +@pytest.mark.parametrize( + "right,breaks,closed", + [ + (True, [-1e-3, 0.25, 0.5, 0.75, 1], "right"), + (False, [0, 0.25, 0.5, 0.75, 1 + 1e-3], "left"), + ], +) +def test_labels(right, breaks, closed): + arr = np.tile(np.arange(0, 1.01, 0.1), 4) + + result, bins = cut(arr, 4, retbins=True, right=right) + ex_levels = IntervalIndex.from_breaks(breaks, closed=closed) + tm.assert_index_equal(result.categories, ex_levels) + + +def test_cut_pass_series_name_to_factor(): + name = "foo" + ser = Series(np.random.default_rng(2).standard_normal(100), name=name) + + factor = cut(ser, 4) + assert factor.name == name + + +def test_label_precision(): + arr = np.arange(0, 0.73, 0.01) + result = cut(arr, 4, precision=2) + + ex_levels = IntervalIndex.from_breaks([-0.00072, 0.18, 0.36, 0.54, 0.72]) + tm.assert_index_equal(result.categories, ex_levels) + + +@pytest.mark.parametrize("labels", [None, False]) +def test_na_handling(labels): + arr = np.arange(0, 0.75, 0.01) + arr[::3] = np.nan + + result = cut(arr, 4, labels=labels) + result = np.asarray(result) + + expected = np.where(isna(arr), np.nan, result) + tm.assert_almost_equal(result, expected) + + +def test_inf_handling(): + data = np.arange(6) + data_ser = Series(data, dtype="int64") + + bins = [-np.inf, 2, 4, np.inf] + result = cut(data, bins) + result_ser = cut(data_ser, bins) + + ex_uniques = IntervalIndex.from_breaks(bins) + tm.assert_index_equal(result.categories, ex_uniques) + + assert result[5] == Interval(4, np.inf) + assert result[0] == Interval(-np.inf, 2) + assert result_ser[5] == Interval(4, np.inf) + assert result_ser[0] == Interval(-np.inf, 2) + + +def test_cut_out_of_bounds(): + arr = np.random.default_rng(2).standard_normal(100) + result = cut(arr, [-1, 0, 1]) + + mask = isna(result) + ex_mask = (arr < -1) | (arr > 1) + tm.assert_numpy_array_equal(mask, ex_mask) + + +@pytest.mark.parametrize( + "get_labels,get_expected", + [ + ( + lambda labels: labels, + lambda labels: Categorical( + ["Medium"] + 4 * ["Small"] + ["Medium", "Large"], + categories=labels, + ordered=True, + ), + ), + ( + lambda labels: Categorical.from_codes([0, 1, 2], labels), + lambda labels: Categorical.from_codes([1] + 4 * [0] + [1, 2], labels), + ), + ], +) +def test_cut_pass_labels(get_labels, get_expected): + bins = [0, 25, 50, 100] + arr = [50, 5, 10, 15, 20, 30, 70] + labels = ["Small", "Medium", "Large"] + + result = cut(arr, bins, labels=get_labels(labels)) + tm.assert_categorical_equal(result, get_expected(labels)) + + +def test_cut_pass_labels_compat(): + # see gh-16459 + arr = [50, 5, 10, 15, 20, 30, 70] + labels = ["Good", "Medium", "Bad"] + + result = cut(arr, 3, labels=labels) + exp = cut(arr, 3, labels=Categorical(labels, categories=labels, ordered=True)) + tm.assert_categorical_equal(result, exp) + + +@pytest.mark.parametrize("x", [np.arange(11.0), np.arange(11.0) / 1e10]) +def test_round_frac_just_works(x): + # It works. + cut(x, 2) + + +@pytest.mark.parametrize( + "val,precision,expected", + [ + (-117.9998, 3, -118), + (117.9998, 3, 118), + (117.9998, 2, 118), + (0.000123456, 2, 0.00012), + ], +) +def test_round_frac(val, precision, expected): + # see gh-1979 + result = tmod._round_frac(val, precision=precision) + assert result == expected + + +def test_cut_return_intervals(): + ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8]) + result = cut(ser, 3) + + exp_bins = np.linspace(0, 8, num=4).round(3) + exp_bins[0] -= 0.008 + + expected = Series( + IntervalIndex.from_breaks(exp_bins, closed="right").take( + [0, 0, 0, 1, 1, 1, 2, 2, 2] + ) + ).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(result, expected) + + +def test_series_ret_bins(): + # see gh-8589 + ser = Series(np.arange(4)) + result, bins = cut(ser, 2, retbins=True) + + expected = Series( + IntervalIndex.from_breaks([-0.003, 1.5, 3], closed="right").repeat(2) + ).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,msg", + [ + ({"duplicates": "drop"}, None), + ({}, "Bin edges must be unique"), + ({"duplicates": "raise"}, "Bin edges must be unique"), + ({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"), + ], +) +def test_cut_duplicates_bin(kwargs, msg): + # see gh-20947 + bins = [0, 2, 4, 6, 10, 10] + values = Series(np.array([1, 3, 5, 7, 9]), index=["a", "b", "c", "d", "e"]) + + if msg is not None: + with pytest.raises(ValueError, match=msg): + cut(values, bins, **kwargs) + else: + result = cut(values, bins, **kwargs) + expected = cut(values, pd.unique(np.asarray(bins))) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("data", [9.0, -9.0, 0.0]) +@pytest.mark.parametrize("length", [1, 2]) +def test_single_bin(data, length): + # see gh-14652, gh-15428 + ser = Series([data] * length) + result = cut(ser, 1, labels=False) + + expected = Series([0] * length, dtype=np.intp) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "array_1_writeable,array_2_writeable", [(True, True), (True, False), (False, False)] +) +def test_cut_read_only(array_1_writeable, array_2_writeable): + # issue 18773 + array_1 = np.arange(0, 100, 10) + array_1.flags.writeable = array_1_writeable + + array_2 = np.arange(0, 100, 10) + array_2.flags.writeable = array_2_writeable + + hundred_elements = np.arange(100) + tm.assert_categorical_equal( + cut(hundred_elements, array_1), cut(hundred_elements, array_2) + ) + + +@pytest.mark.parametrize( + "conv", + [ + lambda v: Timestamp(v), + lambda v: to_datetime(v), + lambda v: np.datetime64(v), + lambda v: Timestamp(v).to_pydatetime(), + ], +) +def test_datetime_bin(conv): + data = [np.datetime64("2012-12-13"), np.datetime64("2012-12-15")] + bin_data = ["2012-12-12", "2012-12-14", "2012-12-16"] + + expected = Series( + IntervalIndex( + [ + Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])), + Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2])), + ] + ) + ).astype(CategoricalDtype(ordered=True)) + + bins = [conv(v) for v in bin_data] + result = Series(cut(data, bins=bins)) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("box", [Series, Index, np.array, list]) +def test_datetime_cut(unit, box): + # see gh-14714 + # + # Testing time data when it comes in various collection types. + data = to_datetime(["2013-01-01", "2013-01-02", "2013-01-03"]).astype(f"M8[{unit}]") + data = box(data) + result, _ = cut(data, 3, retbins=True) + + if box is list: + # We don't (yet) do inference on these, so get nanos + unit = "ns" + + if unit == "s": + # See https://github.com/pandas-dev/pandas/pull/56101#discussion_r1405325425 + # for why we round to 8 seconds instead of 7 + left = DatetimeIndex( + ["2012-12-31 23:57:08", "2013-01-01 16:00:00", "2013-01-02 08:00:00"], + dtype=f"M8[{unit}]", + ) + else: + left = DatetimeIndex( + [ + "2012-12-31 23:57:07.200000", + "2013-01-01 16:00:00", + "2013-01-02 08:00:00", + ], + dtype=f"M8[{unit}]", + ) + right = DatetimeIndex( + ["2013-01-01 16:00:00", "2013-01-02 08:00:00", "2013-01-03 00:00:00"], + dtype=f"M8[{unit}]", + ) + + exp_intervals = IntervalIndex.from_arrays(left, right) + expected = Series(exp_intervals).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(Series(result), expected) + + +@pytest.mark.parametrize("box", [list, np.array, Index, Series]) +def test_datetime_tz_cut_mismatched_tzawareness(box): + # GH#54964 + bins = box( + [ + Timestamp("2013-01-01 04:57:07.200000"), + Timestamp("2013-01-01 21:00:00"), + Timestamp("2013-01-02 13:00:00"), + Timestamp("2013-01-03 05:00:00"), + ] + ) + ser = Series(date_range("20130101", periods=3, tz="US/Eastern")) + + msg = "Cannot use timezone-naive bins with timezone-aware values" + with pytest.raises(ValueError, match=msg): + cut(ser, bins) + + +@pytest.mark.parametrize( + "bins", + [ + 3, + [ + Timestamp("2013-01-01 04:57:07.200000", tz="UTC").tz_convert("US/Eastern"), + Timestamp("2013-01-01 21:00:00", tz="UTC").tz_convert("US/Eastern"), + Timestamp("2013-01-02 13:00:00", tz="UTC").tz_convert("US/Eastern"), + Timestamp("2013-01-03 05:00:00", tz="UTC").tz_convert("US/Eastern"), + ], + ], +) +@pytest.mark.parametrize("box", [list, np.array, Index, Series]) +def test_datetime_tz_cut(bins, box): + # see gh-19872 + tz = "US/Eastern" + ser = Series(date_range("20130101", periods=3, tz=tz)) + + if not isinstance(bins, int): + bins = box(bins) + + result = cut(ser, bins) + expected = Series( + IntervalIndex( + [ + Interval( + Timestamp("2012-12-31 23:57:07.200000", tz=tz), + Timestamp("2013-01-01 16:00:00", tz=tz), + ), + Interval( + Timestamp("2013-01-01 16:00:00", tz=tz), + Timestamp("2013-01-02 08:00:00", tz=tz), + ), + Interval( + Timestamp("2013-01-02 08:00:00", tz=tz), + Timestamp("2013-01-03 00:00:00", tz=tz), + ), + ] + ) + ).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(result, expected) + + +def test_datetime_nan_error(): + msg = "bins must be of datetime64 dtype" + + with pytest.raises(ValueError, match=msg): + cut(date_range("20130101", periods=3), bins=[0, 2, 4]) + + +def test_datetime_nan_mask(): + result = cut( + date_range("20130102", periods=5), bins=date_range("20130101", periods=2) + ) + + mask = result.categories.isna() + tm.assert_numpy_array_equal(mask, np.array([False])) + + mask = result.isna() + tm.assert_numpy_array_equal(mask, np.array([False, True, True, True, True])) + + +@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"]) +def test_datetime_cut_roundtrip(tz, unit): + # see gh-19891 + ser = Series(date_range("20180101", periods=3, tz=tz, unit=unit)) + result, result_bins = cut(ser, 2, retbins=True) + + expected = cut(ser, result_bins) + tm.assert_series_equal(result, expected) + + if unit == "s": + # TODO: constructing DatetimeIndex with dtype="M8[s]" without truncating + # the first entry here raises in array_to_datetime. Should truncate + # instead of raising? + # See https://github.com/pandas-dev/pandas/pull/56101#discussion_r1405325425 + # for why we round to 8 seconds instead of 7 + expected_bins = DatetimeIndex( + ["2017-12-31 23:57:08", "2018-01-02 00:00:00", "2018-01-03 00:00:00"], + dtype=f"M8[{unit}]", + ) + else: + expected_bins = DatetimeIndex( + [ + "2017-12-31 23:57:07.200000", + "2018-01-02 00:00:00", + "2018-01-03 00:00:00", + ], + dtype=f"M8[{unit}]", + ) + expected_bins = expected_bins.tz_localize(tz) + tm.assert_index_equal(result_bins, expected_bins) + + +def test_timedelta_cut_roundtrip(): + # see gh-19891 + ser = Series(timedelta_range("1day", periods=3)) + result, result_bins = cut(ser, 2, retbins=True) + + expected = cut(ser, result_bins) + tm.assert_series_equal(result, expected) + + expected_bins = TimedeltaIndex( + ["0 days 23:57:07.200000", "2 days 00:00:00", "3 days 00:00:00"] + ) + tm.assert_index_equal(result_bins, expected_bins) + + +@pytest.mark.parametrize("bins", [6, 7]) +@pytest.mark.parametrize( + "box, compare", + [ + (Series, tm.assert_series_equal), + (np.array, tm.assert_categorical_equal), + (list, tm.assert_equal), + ], +) +def test_cut_bool_coercion_to_int(bins, box, compare): + # issue 20303 + data_expected = box([0, 1, 1, 0, 1] * 10) + data_result = box([False, True, True, False, True] * 10) + expected = cut(data_expected, bins, duplicates="drop") + result = cut(data_result, bins, duplicates="drop") + compare(result, expected) + + +@pytest.mark.parametrize("labels", ["foo", 1, True]) +def test_cut_incorrect_labels(labels): + # GH 13318 + values = range(5) + msg = "Bin labels must either be False, None or passed in as a list-like argument" + with pytest.raises(ValueError, match=msg): + cut(values, 4, labels=labels) + + +@pytest.mark.parametrize("bins", [3, [0, 5, 15]]) +@pytest.mark.parametrize("right", [True, False]) +@pytest.mark.parametrize("include_lowest", [True, False]) +def test_cut_nullable_integer(bins, right, include_lowest): + a = np.random.default_rng(2).integers(0, 10, size=50).astype(float) + a[::2] = np.nan + result = cut( + pd.array(a, dtype="Int64"), bins, right=right, include_lowest=include_lowest + ) + expected = cut(a, bins, right=right, include_lowest=include_lowest) + tm.assert_categorical_equal(result, expected) + + +@pytest.mark.parametrize( + "data, bins, labels, expected_codes, expected_labels", + [ + ([15, 17, 19], [14, 16, 18, 20], ["A", "B", "A"], [0, 1, 0], ["A", "B"]), + ([1, 3, 5], [0, 2, 4, 6, 8], [2, 0, 1, 2], [2, 0, 1], [0, 1, 2]), + ], +) +def test_cut_non_unique_labels(data, bins, labels, expected_codes, expected_labels): + # GH 33141 + result = cut(data, bins=bins, labels=labels, ordered=False) + expected = Categorical.from_codes( + expected_codes, categories=expected_labels, ordered=False + ) + tm.assert_categorical_equal(result, expected) + + +@pytest.mark.parametrize( + "data, bins, labels, expected_codes, expected_labels", + [ + ([15, 17, 19], [14, 16, 18, 20], ["C", "B", "A"], [0, 1, 2], ["C", "B", "A"]), + ([1, 3, 5], [0, 2, 4, 6, 8], [3, 0, 1, 2], [0, 1, 2], [3, 0, 1, 2]), + ], +) +def test_cut_unordered_labels(data, bins, labels, expected_codes, expected_labels): + # GH 33141 + result = cut(data, bins=bins, labels=labels, ordered=False) + expected = Categorical.from_codes( + expected_codes, categories=expected_labels, ordered=False + ) + tm.assert_categorical_equal(result, expected) + + +def test_cut_unordered_with_missing_labels_raises_error(): + # GH 33141 + msg = "'labels' must be provided if 'ordered = False'" + with pytest.raises(ValueError, match=msg): + cut([0.5, 3], bins=[0, 1, 2], ordered=False) + + +def test_cut_unordered_with_series_labels(): + # https://github.com/pandas-dev/pandas/issues/36603 + ser = Series([1, 2, 3, 4, 5]) + bins = Series([0, 2, 4, 6]) + labels = Series(["a", "b", "c"]) + result = cut(ser, bins=bins, labels=labels, ordered=False) + expected = Series(["a", "a", "b", "b", "c"], dtype="category") + tm.assert_series_equal(result, expected) + + +def test_cut_no_warnings(): + df = DataFrame({"value": np.random.default_rng(2).integers(0, 100, 20)}) + labels = [f"{i} - {i + 9}" for i in range(0, 100, 10)] + with tm.assert_produces_warning(False): + df["group"] = cut(df.value, range(0, 105, 10), right=False, labels=labels) + + +def test_cut_with_duplicated_index_lowest_included(): + # GH 42185 + expected = Series( + [Interval(-0.001, 2, closed="right")] * 3 + + [Interval(2, 4, closed="right"), Interval(-0.001, 2, closed="right")], + index=[0, 1, 2, 3, 0], + dtype="category", + ).cat.as_ordered() + + ser = Series([0, 1, 2, 3, 0], index=[0, 1, 2, 3, 0]) + result = cut(ser, bins=[0, 2, 4], include_lowest=True) + tm.assert_series_equal(result, expected) + + +def test_cut_with_nonexact_categorical_indices(): + # GH 42424 + + ser = Series(range(100)) + ser1 = cut(ser, 10).value_counts().head(5) + ser2 = cut(ser, 10).value_counts().tail(5) + result = DataFrame({"1": ser1, "2": ser2}) + + index = pd.CategoricalIndex( + [ + Interval(-0.099, 9.9, closed="right"), + Interval(9.9, 19.8, closed="right"), + Interval(19.8, 29.7, closed="right"), + Interval(29.7, 39.6, closed="right"), + Interval(39.6, 49.5, closed="right"), + Interval(49.5, 59.4, closed="right"), + Interval(59.4, 69.3, closed="right"), + Interval(69.3, 79.2, closed="right"), + Interval(79.2, 89.1, closed="right"), + Interval(89.1, 99, closed="right"), + ], + ordered=True, + ) + + expected = DataFrame( + {"1": [10] * 5 + [np.nan] * 5, "2": [np.nan] * 5 + [10] * 5}, index=index + ) + + tm.assert_frame_equal(expected, result) + + +def test_cut_with_timestamp_tuple_labels(): + # GH 40661 + labels = [(Timestamp(10),), (Timestamp(20),), (Timestamp(30),)] + result = cut([2, 4, 6], bins=[1, 3, 5, 7], labels=labels) + + expected = Categorical.from_codes([0, 1, 2], labels, ordered=True) + tm.assert_categorical_equal(result, expected) + + +def test_cut_bins_datetime_intervalindex(): + # https://github.com/pandas-dev/pandas/issues/46218 + bins = interval_range(Timestamp("2022-02-25"), Timestamp("2022-02-27"), freq="1D") + # passing Series instead of list is important to trigger bug + result = cut(Series([Timestamp("2022-02-26")]).astype("M8[ns]"), bins=bins) + expected = Categorical.from_codes([0], bins, ordered=True) + tm.assert_categorical_equal(result.array, expected) + + +def test_cut_with_nullable_int64(): + # GH 30787 + series = Series([0, 1, 2, 3, 4, pd.NA, 6, 7], dtype="Int64") + bins = [0, 2, 4, 6, 8] + intervals = IntervalIndex.from_breaks(bins) + + expected = Series( + Categorical.from_codes([-1, 0, 0, 1, 1, -1, 2, 3], intervals, ordered=True) + ) + + result = cut(series, bins=bins) + + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_get_dummies.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_get_dummies.py new file mode 100644 index 0000000000000000000000000000000000000000..31260e4dcb7d2ecf5f003ce4aadf890493b59887 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_get_dummies.py @@ -0,0 +1,743 @@ +import re +import unicodedata + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +from pandas import ( + ArrowDtype, + Categorical, + CategoricalDtype, + CategoricalIndex, + DataFrame, + Index, + RangeIndex, + Series, + SparseDtype, + get_dummies, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + +try: + import pyarrow as pa +except ImportError: + pa = None + + +class TestGetDummies: + @pytest.fixture + def df(self): + return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]}) + + @pytest.fixture(params=["uint8", "i8", np.float64, bool, None]) + def dtype(self, request): + return np.dtype(request.param) + + @pytest.fixture(params=["dense", "sparse"]) + def sparse(self, request): + # params are strings to simplify reading test results, + # e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True] + return request.param == "sparse" + + def effective_dtype(self, dtype): + if dtype is None: + return np.uint8 + return dtype + + def test_get_dummies_raises_on_dtype_object(self, df): + msg = "dtype=object is not a valid dtype for get_dummies" + with pytest.raises(ValueError, match=msg): + get_dummies(df, dtype="object") + + def test_get_dummies_basic(self, sparse, dtype): + s_list = list("abc") + s_series = Series(s_list) + s_series_index = Series(s_list, list("ABC")) + + expected = DataFrame( + {"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]}, + dtype=self.effective_dtype(dtype), + ) + if sparse: + if dtype.kind == "b": + expected = expected.apply(SparseArray, fill_value=False) + else: + expected = expected.apply(SparseArray, fill_value=0.0) + result = get_dummies(s_list, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_series, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + expected.index = list("ABC") + result = get_dummies(s_series_index, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_basic_types(self, sparse, dtype, using_infer_string): + # GH 10531 + s_list = list("abc") + s_series = Series(s_list) + s_df = DataFrame( + {"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]} + ) + + expected = DataFrame( + {"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]}, + dtype=self.effective_dtype(dtype), + columns=list("abc"), + ) + if sparse: + if is_integer_dtype(dtype): + fill_value = 0 + elif dtype == bool: + fill_value = False + else: + fill_value = 0.0 + + expected = expected.apply(SparseArray, fill_value=fill_value) + result = get_dummies(s_list, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_series, sparse=sparse, dtype=dtype) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype) + if sparse: + dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]" + else: + dtype_name = self.effective_dtype(dtype).name + + expected = Series({dtype_name: 8}, name="count") + result = result.dtypes.value_counts() + result.index = [str(i) for i in result.index] + tm.assert_series_equal(result, expected) + + result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype) + + key = "string" if using_infer_string else "object" + expected_counts = {"int64": 1, key: 1} + expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0) + + expected = Series(expected_counts, name="count").sort_index() + result = result.dtypes.value_counts() + result.index = [str(i) for i in result.index] + result = result.sort_index() + tm.assert_series_equal(result, expected) + + def test_get_dummies_just_na(self, sparse): + just_na_list = [np.nan] + just_na_series = Series(just_na_list) + just_na_series_index = Series(just_na_list, index=["A"]) + + res_list = get_dummies(just_na_list, sparse=sparse) + res_series = get_dummies(just_na_series, sparse=sparse) + res_series_index = get_dummies(just_na_series_index, sparse=sparse) + + assert res_list.empty + assert res_series.empty + assert res_series_index.empty + + assert res_list.index.tolist() == [0] + assert res_series.index.tolist() == [0] + assert res_series_index.index.tolist() == ["A"] + + def test_get_dummies_include_na(self, sparse, dtype): + s = ["a", "b", np.nan] + res = get_dummies(s, sparse=sparse, dtype=dtype) + exp = DataFrame( + {"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype) + ) + if sparse: + if dtype.kind == "b": + exp = exp.apply(SparseArray, fill_value=False) + else: + exp = exp.apply(SparseArray, fill_value=0.0) + tm.assert_frame_equal(res, exp) + + # Sparse dataframes do not allow nan labelled columns, see #GH8822 + res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype) + exp_na = DataFrame( + {np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]}, + dtype=self.effective_dtype(dtype), + ) + exp_na = exp_na.reindex(["a", "b", np.nan], axis=1) + # hack (NaN handling in assert_index_equal) + exp_na.columns = res_na.columns + if sparse: + if dtype.kind == "b": + exp_na = exp_na.apply(SparseArray, fill_value=False) + else: + exp_na = exp_na.apply(SparseArray, fill_value=0.0) + tm.assert_frame_equal(res_na, exp_na) + + res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype) + exp_just_na = DataFrame( + Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype) + ) + tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values) + + def test_get_dummies_unicode(self, sparse): + # See GH 6885 - get_dummies chokes on unicode values + e = "e" + eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE") + s = [e, eacute, eacute] + res = get_dummies(s, prefix="letter", sparse=sparse) + exp = DataFrame( + {"letter_e": [True, False, False], f"letter_{eacute}": [False, True, True]} + ) + if sparse: + exp = exp.apply(SparseArray, fill_value=False) + tm.assert_frame_equal(res, exp) + + def test_dataframe_dummies_all_obj(self, df, sparse): + df = df[["A", "B"]] + result = get_dummies(df, sparse=sparse) + expected = DataFrame( + {"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]}, + dtype=bool, + ) + if sparse: + expected = DataFrame( + { + "A_a": SparseArray([1, 0, 1], dtype="bool"), + "A_b": SparseArray([0, 1, 0], dtype="bool"), + "B_b": SparseArray([1, 1, 0], dtype="bool"), + "B_c": SparseArray([0, 0, 1], dtype="bool"), + } + ) + + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_string_dtype(self, df, using_infer_string): + # GH44965 + df = df[["A", "B"]] + df = df.astype({"A": "object", "B": "string"}) + result = get_dummies(df) + expected = DataFrame( + { + "A_a": [1, 0, 1], + "A_b": [0, 1, 0], + "B_b": [1, 1, 0], + "B_c": [0, 0, 1], + }, + dtype=bool, + ) + if not using_infer_string: + # infer_string returns numpy bools + expected[["B_b", "B_c"]] = expected[["B_b", "B_c"]].astype("boolean") + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_mix_default(self, df, sparse, dtype): + result = get_dummies(df, sparse=sparse, dtype=dtype) + if sparse: + arr = SparseArray + if dtype.kind == "b": + typ = SparseDtype(dtype, False) + else: + typ = SparseDtype(dtype, 0) + else: + arr = np.array + typ = dtype + expected = DataFrame( + { + "C": [1, 2, 3], + "A_a": arr([1, 0, 1], dtype=typ), + "A_b": arr([0, 1, 0], dtype=typ), + "B_b": arr([1, 1, 0], dtype=typ), + "B_c": arr([0, 0, 1], dtype=typ), + } + ) + expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]] + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_prefix_list(self, df, sparse): + prefixes = ["from_A", "from_B"] + result = get_dummies(df, prefix=prefixes, sparse=sparse) + expected = DataFrame( + { + "C": [1, 2, 3], + "from_A_a": [True, False, True], + "from_A_b": [False, True, False], + "from_B_b": [True, True, False], + "from_B_c": [False, False, True], + }, + ) + expected[["C"]] = df[["C"]] + cols = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"] + expected = expected[["C"] + cols] + + typ = SparseArray if sparse else Series + expected[cols] = expected[cols].apply(lambda x: typ(x)) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_prefix_str(self, df, sparse): + # not that you should do this... + result = get_dummies(df, prefix="bad", sparse=sparse) + bad_columns = ["bad_a", "bad_b", "bad_b", "bad_c"] + expected = DataFrame( + [ + [1, True, False, True, False], + [2, False, True, True, False], + [3, True, False, False, True], + ], + columns=["C"] + bad_columns, + ) + expected = expected.astype({"C": np.int64}) + if sparse: + # work around astyping & assigning with duplicate columns + # https://github.com/pandas-dev/pandas/issues/14427 + expected = pd.concat( + [ + Series([1, 2, 3], name="C"), + Series([True, False, True], name="bad_a", dtype="Sparse[bool]"), + Series([False, True, False], name="bad_b", dtype="Sparse[bool]"), + Series([True, True, False], name="bad_b", dtype="Sparse[bool]"), + Series([False, False, True], name="bad_c", dtype="Sparse[bool]"), + ], + axis=1, + ) + + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_subset(self, df, sparse): + result = get_dummies(df, prefix=["from_A"], columns=["A"], sparse=sparse) + expected = DataFrame( + { + "B": ["b", "b", "c"], + "C": [1, 2, 3], + "from_A_a": [1, 0, 1], + "from_A_b": [0, 1, 0], + }, + ) + cols = expected.columns + expected[cols[1:]] = expected[cols[1:]].astype(bool) + expected[["C"]] = df[["C"]] + if sparse: + cols = ["from_A_a", "from_A_b"] + expected[cols] = expected[cols].astype(SparseDtype("bool", False)) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_prefix_sep(self, df, sparse): + result = get_dummies(df, prefix_sep="..", sparse=sparse) + expected = DataFrame( + { + "C": [1, 2, 3], + "A..a": [True, False, True], + "A..b": [False, True, False], + "B..b": [True, True, False], + "B..c": [False, False, True], + }, + ) + expected[["C"]] = df[["C"]] + expected = expected[["C", "A..a", "A..b", "B..b", "B..c"]] + if sparse: + cols = ["A..a", "A..b", "B..b", "B..c"] + expected[cols] = expected[cols].astype(SparseDtype("bool", False)) + + tm.assert_frame_equal(result, expected) + + result = get_dummies(df, prefix_sep=["..", "__"], sparse=sparse) + expected = expected.rename(columns={"B..b": "B__b", "B..c": "B__c"}) + tm.assert_frame_equal(result, expected) + + result = get_dummies(df, prefix_sep={"A": "..", "B": "__"}, sparse=sparse) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_prefix_bad_length(self, df, sparse): + msg = re.escape( + "Length of 'prefix' (1) did not match the length of the columns being " + "encoded (2)" + ) + with pytest.raises(ValueError, match=msg): + get_dummies(df, prefix=["too few"], sparse=sparse) + + def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse): + msg = re.escape( + "Length of 'prefix_sep' (1) did not match the length of the columns being " + "encoded (2)" + ) + with pytest.raises(ValueError, match=msg): + get_dummies(df, prefix_sep=["bad"], sparse=sparse) + + def test_dataframe_dummies_prefix_dict(self, sparse): + prefixes = {"A": "from_A", "B": "from_B"} + df = DataFrame({"C": [1, 2, 3], "A": ["a", "b", "a"], "B": ["b", "b", "c"]}) + result = get_dummies(df, prefix=prefixes, sparse=sparse) + + expected = DataFrame( + { + "C": [1, 2, 3], + "from_A_a": [1, 0, 1], + "from_A_b": [0, 1, 0], + "from_B_b": [1, 1, 0], + "from_B_c": [0, 0, 1], + } + ) + + columns = ["from_A_a", "from_A_b", "from_B_b", "from_B_c"] + expected[columns] = expected[columns].astype(bool) + if sparse: + expected[columns] = expected[columns].astype(SparseDtype("bool", False)) + + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_with_na(self, df, sparse, dtype): + df.loc[3, :] = [np.nan, np.nan, np.nan] + result = get_dummies(df, dummy_na=True, sparse=sparse, dtype=dtype).sort_index( + axis=1 + ) + + if sparse: + arr = SparseArray + if dtype.kind == "b": + typ = SparseDtype(dtype, False) + else: + typ = SparseDtype(dtype, 0) + else: + arr = np.array + typ = dtype + + expected = DataFrame( + { + "C": [1, 2, 3, np.nan], + "A_a": arr([1, 0, 1, 0], dtype=typ), + "A_b": arr([0, 1, 0, 0], dtype=typ), + "A_nan": arr([0, 0, 0, 1], dtype=typ), + "B_b": arr([1, 1, 0, 0], dtype=typ), + "B_c": arr([0, 0, 1, 0], dtype=typ), + "B_nan": arr([0, 0, 0, 1], dtype=typ), + } + ).sort_index(axis=1) + + tm.assert_frame_equal(result, expected) + + result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype) + expected = expected[["C", "A_a", "A_b", "B_b", "B_c"]] + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_with_categorical(self, df, sparse, dtype): + df["cat"] = Categorical(["x", "y", "y"]) + result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1) + if sparse: + arr = SparseArray + if dtype.kind == "b": + typ = SparseDtype(dtype, False) + else: + typ = SparseDtype(dtype, 0) + else: + arr = np.array + typ = dtype + + expected = DataFrame( + { + "C": [1, 2, 3], + "A_a": arr([1, 0, 1], dtype=typ), + "A_b": arr([0, 1, 0], dtype=typ), + "B_b": arr([1, 1, 0], dtype=typ), + "B_c": arr([0, 0, 1], dtype=typ), + "cat_x": arr([1, 0, 0], dtype=typ), + "cat_y": arr([0, 1, 1], dtype=typ), + } + ).sort_index(axis=1) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "get_dummies_kwargs,expected", + [ + ( + {"data": DataFrame({"ä": ["a"]})}, + DataFrame({"ä_a": [True]}), + ), + ( + {"data": DataFrame({"x": ["ä"]})}, + DataFrame({"x_ä": [True]}), + ), + ( + {"data": DataFrame({"x": ["a"]}), "prefix": "ä"}, + DataFrame({"ä_a": [True]}), + ), + ( + {"data": DataFrame({"x": ["a"]}), "prefix_sep": "ä"}, + DataFrame({"xäa": [True]}), + ), + ], + ) + def test_dataframe_dummies_unicode(self, get_dummies_kwargs, expected): + # GH22084 get_dummies incorrectly encodes unicode characters + # in dataframe column names + result = get_dummies(**get_dummies_kwargs) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_basic_drop_first(self, sparse): + # GH12402 Add a new parameter `drop_first` to avoid collinearity + # Basic case + s_list = list("abc") + s_series = Series(s_list) + s_series_index = Series(s_list, list("ABC")) + + expected = DataFrame({"b": [0, 1, 0], "c": [0, 0, 1]}, dtype=bool) + + result = get_dummies(s_list, drop_first=True, sparse=sparse) + if sparse: + expected = expected.apply(SparseArray, fill_value=False) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_series, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + expected.index = list("ABC") + result = get_dummies(s_series_index, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_basic_drop_first_one_level(self, sparse): + # Test the case that categorical variable only has one level. + s_list = list("aaa") + s_series = Series(s_list) + s_series_index = Series(s_list, list("ABC")) + + expected = DataFrame(index=RangeIndex(3)) + + result = get_dummies(s_list, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + result = get_dummies(s_series, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + expected = DataFrame(index=list("ABC")) + result = get_dummies(s_series_index, drop_first=True, sparse=sparse) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_basic_drop_first_NA(self, sparse): + # Test NA handling together with drop_first + s_NA = ["a", "b", np.nan] + res = get_dummies(s_NA, drop_first=True, sparse=sparse) + exp = DataFrame({"b": [0, 1, 0]}, dtype=bool) + if sparse: + exp = exp.apply(SparseArray, fill_value=False) + + tm.assert_frame_equal(res, exp) + + res_na = get_dummies(s_NA, dummy_na=True, drop_first=True, sparse=sparse) + exp_na = DataFrame({"b": [0, 1, 0], np.nan: [0, 0, 1]}, dtype=bool).reindex( + ["b", np.nan], axis=1 + ) + if sparse: + exp_na = exp_na.apply(SparseArray, fill_value=False) + tm.assert_frame_equal(res_na, exp_na) + + res_just_na = get_dummies( + [np.nan], dummy_na=True, drop_first=True, sparse=sparse + ) + exp_just_na = DataFrame(index=RangeIndex(1)) + tm.assert_frame_equal(res_just_na, exp_just_na) + + def test_dataframe_dummies_drop_first(self, df, sparse): + df = df[["A", "B"]] + result = get_dummies(df, drop_first=True, sparse=sparse) + expected = DataFrame({"A_b": [0, 1, 0], "B_c": [0, 0, 1]}, dtype=bool) + if sparse: + expected = expected.apply(SparseArray, fill_value=False) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_drop_first_with_categorical(self, df, sparse, dtype): + df["cat"] = Categorical(["x", "y", "y"]) + result = get_dummies(df, drop_first=True, sparse=sparse) + expected = DataFrame( + {"C": [1, 2, 3], "A_b": [0, 1, 0], "B_c": [0, 0, 1], "cat_y": [0, 1, 1]} + ) + cols = ["A_b", "B_c", "cat_y"] + expected[cols] = expected[cols].astype(bool) + expected = expected[["C", "A_b", "B_c", "cat_y"]] + if sparse: + for col in cols: + expected[col] = SparseArray(expected[col]) + tm.assert_frame_equal(result, expected) + + def test_dataframe_dummies_drop_first_with_na(self, df, sparse): + df.loc[3, :] = [np.nan, np.nan, np.nan] + result = get_dummies( + df, dummy_na=True, drop_first=True, sparse=sparse + ).sort_index(axis=1) + expected = DataFrame( + { + "C": [1, 2, 3, np.nan], + "A_b": [0, 1, 0, 0], + "A_nan": [0, 0, 0, 1], + "B_c": [0, 0, 1, 0], + "B_nan": [0, 0, 0, 1], + } + ) + cols = ["A_b", "A_nan", "B_c", "B_nan"] + expected[cols] = expected[cols].astype(bool) + expected = expected.sort_index(axis=1) + if sparse: + for col in cols: + expected[col] = SparseArray(expected[col]) + + tm.assert_frame_equal(result, expected) + + result = get_dummies(df, dummy_na=False, drop_first=True, sparse=sparse) + expected = expected[["C", "A_b", "B_c"]] + tm.assert_frame_equal(result, expected) + + def test_get_dummies_int_int(self): + data = Series([1, 2, 1]) + result = get_dummies(data) + expected = DataFrame([[1, 0], [0, 1], [1, 0]], columns=[1, 2], dtype=bool) + tm.assert_frame_equal(result, expected) + + data = Series(Categorical(["a", "b", "a"])) + result = get_dummies(data) + expected = DataFrame( + [[1, 0], [0, 1], [1, 0]], columns=Categorical(["a", "b"]), dtype=bool + ) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_int_df(self, dtype): + data = DataFrame( + { + "A": [1, 2, 1], + "B": Categorical(["a", "b", "a"]), + "C": [1, 2, 1], + "D": [1.0, 2.0, 1.0], + } + ) + columns = ["C", "D", "A_1", "A_2", "B_a", "B_b"] + expected = DataFrame( + [[1, 1.0, 1, 0, 1, 0], [2, 2.0, 0, 1, 0, 1], [1, 1.0, 1, 0, 1, 0]], + columns=columns, + ) + expected[columns[2:]] = expected[columns[2:]].astype(dtype) + result = get_dummies(data, columns=["A", "B"], dtype=dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_dataframe_dummies_preserve_categorical_dtype(self, dtype, ordered): + # GH13854 + cat = Categorical(list("xy"), categories=list("xyz"), ordered=ordered) + result = get_dummies(cat, dtype=dtype) + + data = np.array([[1, 0, 0], [0, 1, 0]], dtype=self.effective_dtype(dtype)) + cols = CategoricalIndex( + cat.categories, categories=cat.categories, ordered=ordered + ) + expected = DataFrame(data, columns=cols, dtype=self.effective_dtype(dtype)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("sparse", [True, False]) + def test_get_dummies_dont_sparsify_all_columns(self, sparse): + # GH18914 + df = DataFrame.from_dict({"GDP": [1, 2], "Nation": ["AB", "CD"]}) + df = get_dummies(df, columns=["Nation"], sparse=sparse) + df2 = df.reindex(columns=["GDP"]) + + tm.assert_frame_equal(df[["GDP"]], df2) + + def test_get_dummies_duplicate_columns(self, df): + # GH20839 + df.columns = ["A", "A", "A"] + result = get_dummies(df).sort_index(axis=1) + + expected = DataFrame( + [ + [1, True, False, True, False], + [2, False, True, True, False], + [3, True, False, False, True], + ], + columns=["A", "A_a", "A_b", "A_b", "A_c"], + ).sort_index(axis=1) + + expected = expected.astype({"A": np.int64}) + + tm.assert_frame_equal(result, expected) + + def test_get_dummies_all_sparse(self): + df = DataFrame({"A": [1, 2]}) + result = get_dummies(df, columns=["A"], sparse=True) + dtype = SparseDtype("bool", False) + expected = DataFrame( + { + "A_1": SparseArray([1, 0], dtype=dtype), + "A_2": SparseArray([0, 1], dtype=dtype), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("values", ["baz"]) + def test_get_dummies_with_string_values(self, values): + # issue #28383 + df = DataFrame( + { + "bar": [1, 2, 3, 4, 5, 6], + "foo": ["one", "one", "one", "two", "two", "two"], + "baz": ["A", "B", "C", "A", "B", "C"], + "zoo": ["x", "y", "z", "q", "w", "t"], + } + ) + + msg = "Input must be a list-like for parameter `columns`" + + with pytest.raises(TypeError, match=msg): + get_dummies(df, columns=values) + + def test_get_dummies_ea_dtype_series(self, any_numeric_ea_and_arrow_dtype): + # GH#32430 + ser = Series(list("abca")) + result = get_dummies(ser, dtype=any_numeric_ea_and_arrow_dtype) + expected = DataFrame( + {"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], "c": [0, 0, 1, 0]}, + dtype=any_numeric_ea_and_arrow_dtype, + ) + tm.assert_frame_equal(result, expected) + + def test_get_dummies_ea_dtype_dataframe(self, any_numeric_ea_and_arrow_dtype): + # GH#32430 + df = DataFrame({"x": list("abca")}) + result = get_dummies(df, dtype=any_numeric_ea_and_arrow_dtype) + expected = DataFrame( + {"x_a": [1, 0, 0, 1], "x_b": [0, 1, 0, 0], "x_c": [0, 0, 1, 0]}, + dtype=any_numeric_ea_and_arrow_dtype, + ) + tm.assert_frame_equal(result, expected) + + @td.skip_if_no("pyarrow") + def test_get_dummies_ea_dtype(self): + # GH#56273 + for dtype, exp_dtype in [ + ("string[pyarrow]", "boolean"), + ("string[pyarrow_numpy]", "bool"), + (CategoricalDtype(Index(["a"], dtype="string[pyarrow]")), "boolean"), + (CategoricalDtype(Index(["a"], dtype="string[pyarrow_numpy]")), "bool"), + ]: + df = DataFrame({"name": Series(["a"], dtype=dtype), "x": 1}) + result = get_dummies(df) + expected = DataFrame({"x": 1, "name_a": Series([True], dtype=exp_dtype)}) + tm.assert_frame_equal(result, expected) + + @td.skip_if_no("pyarrow") + def test_get_dummies_arrow_dtype(self): + # GH#56273 + df = DataFrame({"name": Series(["a"], dtype=ArrowDtype(pa.string())), "x": 1}) + result = get_dummies(df) + expected = DataFrame({"x": 1, "name_a": Series([True], dtype="bool[pyarrow]")}) + tm.assert_frame_equal(result, expected) + + df = DataFrame( + { + "name": Series( + ["a"], + dtype=CategoricalDtype(Index(["a"], dtype=ArrowDtype(pa.string()))), + ), + "x": 1, + } + ) + result = get_dummies(df) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_melt.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_melt.py new file mode 100644 index 0000000000000000000000000000000000000000..272c5b34032937215b3ffff45ea10a9e31048041 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_melt.py @@ -0,0 +1,1252 @@ +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + date_range, + lreshape, + melt, + wide_to_long, +) +import pandas._testing as tm + + +@pytest.fixture +def df(): + res = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + res["id1"] = (res["A"] > 0).astype(np.int64) + res["id2"] = (res["B"] > 0).astype(np.int64) + return res + + +@pytest.fixture +def df1(): + res = DataFrame( + [ + [1.067683, -1.110463, 0.20867], + [-1.321405, 0.368915, -1.055342], + [-0.807333, 0.08298, -0.873361], + ] + ) + res.columns = [list("ABC"), list("abc")] + res.columns.names = ["CAP", "low"] + return res + + +@pytest.fixture +def var_name(): + return "var" + + +@pytest.fixture +def value_name(): + return "val" + + +class TestMelt: + def test_top_level_method(self, df): + result = melt(df) + assert result.columns.tolist() == ["variable", "value"] + + def test_method_signatures(self, df, df1, var_name, value_name): + tm.assert_frame_equal(df.melt(), melt(df)) + + tm.assert_frame_equal( + df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]), + melt(df, id_vars=["id1", "id2"], value_vars=["A", "B"]), + ) + + tm.assert_frame_equal( + df.melt(var_name=var_name, value_name=value_name), + melt(df, var_name=var_name, value_name=value_name), + ) + + tm.assert_frame_equal(df1.melt(col_level=0), melt(df1, col_level=0)) + + def test_default_col_names(self, df): + result = df.melt() + assert result.columns.tolist() == ["variable", "value"] + + result1 = df.melt(id_vars=["id1"]) + assert result1.columns.tolist() == ["id1", "variable", "value"] + + result2 = df.melt(id_vars=["id1", "id2"]) + assert result2.columns.tolist() == ["id1", "id2", "variable", "value"] + + def test_value_vars(self, df): + result3 = df.melt(id_vars=["id1", "id2"], value_vars="A") + assert len(result3) == 10 + + result4 = df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]) + expected4 = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + "variable": ["A"] * 10 + ["B"] * 10, + "value": (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", "variable", "value"], + ) + tm.assert_frame_equal(result4, expected4) + + @pytest.mark.parametrize("type_", (tuple, list, np.array)) + def test_value_vars_types(self, type_, df): + # GH 15348 + expected = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + "variable": ["A"] * 10 + ["B"] * 10, + "value": (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", "variable", "value"], + ) + result = df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B"))) + tm.assert_frame_equal(result, expected) + + def test_vars_work_with_multiindex(self, df1): + expected = DataFrame( + { + ("A", "a"): df1[("A", "a")], + "CAP": ["B"] * len(df1), + "low": ["b"] * len(df1), + "value": df1[("B", "b")], + }, + columns=[("A", "a"), "CAP", "low", "value"], + ) + + result = df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "id_vars, value_vars, col_level, expected", + [ + ( + ["A"], + ["B"], + 0, + DataFrame( + { + "A": {0: 1.067683, 1: -1.321405, 2: -0.807333}, + "CAP": {0: "B", 1: "B", 2: "B"}, + "value": {0: -1.110463, 1: 0.368915, 2: 0.08298}, + } + ), + ), + ( + ["a"], + ["b"], + 1, + DataFrame( + { + "a": {0: 1.067683, 1: -1.321405, 2: -0.807333}, + "low": {0: "b", 1: "b", 2: "b"}, + "value": {0: -1.110463, 1: 0.368915, 2: 0.08298}, + } + ), + ), + ], + ) + def test_single_vars_work_with_multiindex( + self, id_vars, value_vars, col_level, expected, df1 + ): + result = df1.melt(id_vars, value_vars, col_level=col_level) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "id_vars, value_vars", + [ + [("A", "a"), [("B", "b")]], + [[("A", "a")], ("B", "b")], + [("A", "a"), ("B", "b")], + ], + ) + def test_tuple_vars_fail_with_multiindex(self, id_vars, value_vars, df1): + # melt should fail with an informative error message if + # the columns have a MultiIndex and a tuple is passed + # for id_vars or value_vars. + msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex" + with pytest.raises(ValueError, match=msg): + df1.melt(id_vars=id_vars, value_vars=value_vars) + + def test_custom_var_name(self, df, var_name): + result5 = df.melt(var_name=var_name) + assert result5.columns.tolist() == ["var", "value"] + + result6 = df.melt(id_vars=["id1"], var_name=var_name) + assert result6.columns.tolist() == ["id1", "var", "value"] + + result7 = df.melt(id_vars=["id1", "id2"], var_name=var_name) + assert result7.columns.tolist() == ["id1", "id2", "var", "value"] + + result8 = df.melt(id_vars=["id1", "id2"], value_vars="A", var_name=var_name) + assert result8.columns.tolist() == ["id1", "id2", "var", "value"] + + result9 = df.melt( + id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=var_name + ) + expected9 = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + var_name: ["A"] * 10 + ["B"] * 10, + "value": (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", var_name, "value"], + ) + tm.assert_frame_equal(result9, expected9) + + def test_custom_value_name(self, df, value_name): + result10 = df.melt(value_name=value_name) + assert result10.columns.tolist() == ["variable", "val"] + + result11 = df.melt(id_vars=["id1"], value_name=value_name) + assert result11.columns.tolist() == ["id1", "variable", "val"] + + result12 = df.melt(id_vars=["id1", "id2"], value_name=value_name) + assert result12.columns.tolist() == ["id1", "id2", "variable", "val"] + + result13 = df.melt( + id_vars=["id1", "id2"], value_vars="A", value_name=value_name + ) + assert result13.columns.tolist() == ["id1", "id2", "variable", "val"] + + result14 = df.melt( + id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=value_name + ) + expected14 = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + "variable": ["A"] * 10 + ["B"] * 10, + value_name: (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", "variable", value_name], + ) + tm.assert_frame_equal(result14, expected14) + + def test_custom_var_and_value_name(self, df, value_name, var_name): + result15 = df.melt(var_name=var_name, value_name=value_name) + assert result15.columns.tolist() == ["var", "val"] + + result16 = df.melt(id_vars=["id1"], var_name=var_name, value_name=value_name) + assert result16.columns.tolist() == ["id1", "var", "val"] + + result17 = df.melt( + id_vars=["id1", "id2"], var_name=var_name, value_name=value_name + ) + assert result17.columns.tolist() == ["id1", "id2", "var", "val"] + + result18 = df.melt( + id_vars=["id1", "id2"], + value_vars="A", + var_name=var_name, + value_name=value_name, + ) + assert result18.columns.tolist() == ["id1", "id2", "var", "val"] + + result19 = df.melt( + id_vars=["id1", "id2"], + value_vars=["A", "B"], + var_name=var_name, + value_name=value_name, + ) + expected19 = DataFrame( + { + "id1": df["id1"].tolist() * 2, + "id2": df["id2"].tolist() * 2, + var_name: ["A"] * 10 + ["B"] * 10, + value_name: (df["A"].tolist() + df["B"].tolist()), + }, + columns=["id1", "id2", var_name, value_name], + ) + tm.assert_frame_equal(result19, expected19) + + df20 = df.copy() + df20.columns.name = "foo" + result20 = df20.melt() + assert result20.columns.tolist() == ["foo", "value"] + + @pytest.mark.parametrize("col_level", [0, "CAP"]) + def test_col_level(self, col_level, df1): + res = df1.melt(col_level=col_level) + assert res.columns.tolist() == ["CAP", "value"] + + def test_multiindex(self, df1): + res = df1.melt() + assert res.columns.tolist() == ["CAP", "low", "value"] + + @pytest.mark.parametrize( + "col", + [ + pd.Series(date_range("2010", periods=5, tz="US/Pacific")), + pd.Series(["a", "b", "c", "a", "d"], dtype="category"), + pd.Series([0, 1, 0, 0, 0]), + ], + ) + def test_pandas_dtypes(self, col): + # GH 15785 + df = DataFrame( + {"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col} + ) + expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True) + result = melt( + df, id_vars=["klass", "col"], var_name="attribute", value_name="value" + ) + expected = DataFrame( + { + 0: list(range(5)) * 2, + 1: pd.concat([col] * 2, ignore_index=True), + 2: ["attr1"] * 5 + ["attr2"] * 5, + 3: expected_value, + } + ) + expected.columns = ["klass", "col", "attribute", "value"] + tm.assert_frame_equal(result, expected) + + def test_preserve_category(self): + # GH 15853 + data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])}) + result = melt(data, ["B"], ["A"]) + expected = DataFrame( + {"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]} + ) + + tm.assert_frame_equal(result, expected) + + def test_melt_missing_columns_raises(self): + # GH-23575 + # This test is to ensure that pandas raises an error if melting is + # attempted with column names absent from the dataframe + + # Generate data + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd") + ) + + # Try to melt with missing `value_vars` column name + msg = "The following id_vars or value_vars are not present in the DataFrame:" + with pytest.raises(KeyError, match=msg): + df.melt(["a", "b"], ["C", "d"]) + + # Try to melt with missing `id_vars` column name + with pytest.raises(KeyError, match=msg): + df.melt(["A", "b"], ["c", "d"]) + + # Multiple missing + with pytest.raises( + KeyError, + match=msg, + ): + df.melt(["a", "b", "not_here", "or_there"], ["c", "d"]) + + # Multiindex melt fails if column is missing from multilevel melt + multi = df.copy() + multi.columns = [list("ABCD"), list("abcd")] + with pytest.raises(KeyError, match=msg): + multi.melt([("E", "a")], [("B", "b")]) + # Multiindex fails if column is missing from single level melt + with pytest.raises(KeyError, match=msg): + multi.melt(["A"], ["F"], col_level=0) + + def test_melt_mixed_int_str_id_vars(self): + # GH 29718 + df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]}) + result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"]) + expected = DataFrame( + {0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]} + ) + tm.assert_frame_equal(result, expected) + + def test_melt_mixed_int_str_value_vars(self): + # GH 29718 + df = DataFrame({0: ["foo"], "a": ["bar"]}) + result = melt(df, value_vars=[0, "a"]) + expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]}) + tm.assert_frame_equal(result, expected) + + def test_ignore_index(self): + # GH 17440 + df = DataFrame({"foo": [0], "bar": [1]}, index=["first"]) + result = melt(df, ignore_index=False) + expected = DataFrame( + {"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"] + ) + tm.assert_frame_equal(result, expected) + + def test_ignore_multiindex(self): + # GH 17440 + index = pd.MultiIndex.from_tuples( + [("first", "second"), ("first", "third")], names=["baz", "foobar"] + ) + df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index) + result = melt(df, ignore_index=False) + + expected_index = pd.MultiIndex.from_tuples( + [("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"] + ) + expected = DataFrame( + {"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]}, + index=expected_index, + ) + + tm.assert_frame_equal(result, expected) + + def test_ignore_index_name_and_type(self): + # GH 17440 + index = Index(["foo", "bar"], dtype="category", name="baz") + df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index) + result = melt(df, ignore_index=False) + + expected_index = Index(["foo", "bar"] * 2, dtype="category", name="baz") + expected = DataFrame( + {"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]}, + index=expected_index, + ) + + tm.assert_frame_equal(result, expected) + + def test_melt_with_duplicate_columns(self): + # GH#41951 + df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"]) + result = df.melt(id_vars=["a"], value_vars=["b"]) + expected = DataFrame( + [["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"] + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", ["Int8", "Int64"]) + def test_melt_ea_dtype(self, dtype): + # GH#41570 + df = DataFrame( + { + "a": pd.Series([1, 2], dtype="Int8"), + "b": pd.Series([3, 4], dtype=dtype), + } + ) + result = df.melt() + expected = DataFrame( + { + "variable": ["a", "a", "b", "b"], + "value": pd.Series([1, 2, 3, 4], dtype=dtype), + } + ) + tm.assert_frame_equal(result, expected) + + def test_melt_ea_columns(self): + # GH 54297 + df = DataFrame( + { + "A": {0: "a", 1: "b", 2: "c"}, + "B": {0: 1, 1: 3, 2: 5}, + "C": {0: 2, 1: 4, 2: 6}, + } + ) + df.columns = df.columns.astype("string[python]") + result = df.melt(id_vars=["A"], value_vars=["B"]) + expected = DataFrame( + { + "A": list("abc"), + "variable": pd.Series(["B"] * 3, dtype="string[python]"), + "value": [1, 3, 5], + } + ) + tm.assert_frame_equal(result, expected) + + def test_melt_preserves_datetime(self): + df = DataFrame( + data=[ + { + "type": "A0", + "start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"), + "end_date": pd.Timestamp("2023/03/10", tz="Asia/Tokyo"), + }, + { + "type": "A1", + "start_date": pd.Timestamp("2023/03/01", tz="Asia/Tokyo"), + "end_date": pd.Timestamp("2023/03/11", tz="Asia/Tokyo"), + }, + ], + index=["aaaa", "bbbb"], + ) + result = df.melt( + id_vars=["type"], + value_vars=["start_date", "end_date"], + var_name="start/end", + value_name="date", + ) + expected = DataFrame( + { + "type": {0: "A0", 1: "A1", 2: "A0", 3: "A1"}, + "start/end": { + 0: "start_date", + 1: "start_date", + 2: "end_date", + 3: "end_date", + }, + "date": { + 0: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"), + 1: pd.Timestamp("2023-03-01 00:00:00+0900", tz="Asia/Tokyo"), + 2: pd.Timestamp("2023-03-10 00:00:00+0900", tz="Asia/Tokyo"), + 3: pd.Timestamp("2023-03-11 00:00:00+0900", tz="Asia/Tokyo"), + }, + } + ) + tm.assert_frame_equal(result, expected) + + def test_melt_allows_non_scalar_id_vars(self): + df = DataFrame( + data={"a": [1, 2, 3], "b": [4, 5, 6]}, + index=["11", "22", "33"], + ) + result = df.melt( + id_vars="a", + var_name=0, + value_name=1, + ) + expected = DataFrame({"a": [1, 2, 3], 0: ["b"] * 3, 1: [4, 5, 6]}) + tm.assert_frame_equal(result, expected) + + def test_melt_allows_non_string_var_name(self): + df = DataFrame( + data={"a": [1, 2, 3], "b": [4, 5, 6]}, + index=["11", "22", "33"], + ) + result = df.melt( + id_vars=["a"], + var_name=0, + value_name=1, + ) + expected = DataFrame({"a": [1, 2, 3], 0: ["b"] * 3, 1: [4, 5, 6]}) + tm.assert_frame_equal(result, expected) + + def test_melt_non_scalar_var_name_raises(self): + df = DataFrame( + data={"a": [1, 2, 3], "b": [4, 5, 6]}, + index=["11", "22", "33"], + ) + with pytest.raises(ValueError, match=r".* must be a scalar."): + df.melt(id_vars=["a"], var_name=[1, 2]) + + +class TestLreshape: + def test_pairs(self): + data = { + "birthdt": [ + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + ], + "birthwt": [1766, 3301, 1454, 3139, 4133], + "id": [101, 102, 103, 104, 105], + "sex": ["Male", "Female", "Female", "Female", "Female"], + "visitdt1": [ + "11jan2009", + "22dec2008", + "04jan2009", + "29dec2008", + "20jan2009", + ], + "visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"], + "visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"], + "wt1": [1823, 3338, 1549, 3298, 4306], + "wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0], + "wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0], + } + + df = DataFrame(data) + + spec = { + "visitdt": [f"visitdt{i:d}" for i in range(1, 4)], + "wt": [f"wt{i:d}" for i in range(1, 4)], + } + result = lreshape(df, spec) + + exp_data = { + "birthdt": [ + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + "08jan2009", + "30dec2008", + "21dec2008", + "11jan2009", + "08jan2009", + "21dec2008", + "11jan2009", + ], + "birthwt": [ + 1766, + 3301, + 1454, + 3139, + 4133, + 1766, + 1454, + 3139, + 4133, + 1766, + 3139, + 4133, + ], + "id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105], + "sex": [ + "Male", + "Female", + "Female", + "Female", + "Female", + "Male", + "Female", + "Female", + "Female", + "Male", + "Female", + "Female", + ], + "visitdt": [ + "11jan2009", + "22dec2008", + "04jan2009", + "29dec2008", + "20jan2009", + "21jan2009", + "22jan2009", + "31dec2008", + "03feb2009", + "05feb2009", + "02jan2009", + "15feb2009", + ], + "wt": [ + 1823.0, + 3338.0, + 1549.0, + 3298.0, + 4306.0, + 2011.0, + 1892.0, + 3338.0, + 4575.0, + 2293.0, + 3377.0, + 4805.0, + ], + } + exp = DataFrame(exp_data, columns=result.columns) + tm.assert_frame_equal(result, exp) + + result = lreshape(df, spec, dropna=False) + exp_data = { + "birthdt": [ + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + "08jan2009", + "20dec2008", + "30dec2008", + "21dec2008", + "11jan2009", + ], + "birthwt": [ + 1766, + 3301, + 1454, + 3139, + 4133, + 1766, + 3301, + 1454, + 3139, + 4133, + 1766, + 3301, + 1454, + 3139, + 4133, + ], + "id": [ + 101, + 102, + 103, + 104, + 105, + 101, + 102, + 103, + 104, + 105, + 101, + 102, + 103, + 104, + 105, + ], + "sex": [ + "Male", + "Female", + "Female", + "Female", + "Female", + "Male", + "Female", + "Female", + "Female", + "Female", + "Male", + "Female", + "Female", + "Female", + "Female", + ], + "visitdt": [ + "11jan2009", + "22dec2008", + "04jan2009", + "29dec2008", + "20jan2009", + "21jan2009", + np.nan, + "22jan2009", + "31dec2008", + "03feb2009", + "05feb2009", + np.nan, + np.nan, + "02jan2009", + "15feb2009", + ], + "wt": [ + 1823.0, + 3338.0, + 1549.0, + 3298.0, + 4306.0, + 2011.0, + np.nan, + 1892.0, + 3338.0, + 4575.0, + 2293.0, + np.nan, + np.nan, + 3377.0, + 4805.0, + ], + } + exp = DataFrame(exp_data, columns=result.columns) + tm.assert_frame_equal(result, exp) + + spec = { + "visitdt": [f"visitdt{i:d}" for i in range(1, 3)], + "wt": [f"wt{i:d}" for i in range(1, 4)], + } + msg = "All column lists must be same length" + with pytest.raises(ValueError, match=msg): + lreshape(df, spec) + + +class TestWideToLong: + def test_simple(self): + x = np.random.default_rng(2).standard_normal(3) + df = DataFrame( + { + "A1970": {0: "a", 1: "b", 2: "c"}, + "A1980": {0: "d", 1: "e", 2: "f"}, + "B1970": {0: 2.5, 1: 1.2, 2: 0.7}, + "B1980": {0: 3.2, 1: 1.3, 2: 0.1}, + "X": dict(zip(range(3), x)), + } + ) + df["id"] = df.index + exp_data = { + "X": x.tolist() + x.tolist(), + "A": ["a", "b", "c", "d", "e", "f"], + "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2], + } + expected = DataFrame(exp_data) + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + result = wide_to_long(df, ["A", "B"], i="id", j="year") + tm.assert_frame_equal(result, expected) + + def test_stubs(self): + # GH9204 wide_to_long call should not modify 'stubs' list + df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]]) + df.columns = ["id", "inc1", "inc2", "edu1", "edu2"] + stubs = ["inc", "edu"] + + wide_to_long(df, stubs, i="id", j="age") + + assert stubs == ["inc", "edu"] + + def test_separating_character(self): + # GH14779 + + x = np.random.default_rng(2).standard_normal(3) + df = DataFrame( + { + "A.1970": {0: "a", 1: "b", 2: "c"}, + "A.1980": {0: "d", 1: "e", 2: "f"}, + "B.1970": {0: 2.5, 1: 1.2, 2: 0.7}, + "B.1980": {0: 3.2, 1: 1.3, 2: 0.1}, + "X": dict(zip(range(3), x)), + } + ) + df["id"] = df.index + exp_data = { + "X": x.tolist() + x.tolist(), + "A": ["a", "b", "c", "d", "e", "f"], + "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2], + } + expected = DataFrame(exp_data) + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".") + tm.assert_frame_equal(result, expected) + + def test_escapable_characters(self): + x = np.random.default_rng(2).standard_normal(3) + df = DataFrame( + { + "A(quarterly)1970": {0: "a", 1: "b", 2: "c"}, + "A(quarterly)1980": {0: "d", 1: "e", 2: "f"}, + "B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7}, + "B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1}, + "X": dict(zip(range(3), x)), + } + ) + df["id"] = df.index + exp_data = { + "X": x.tolist() + x.tolist(), + "A(quarterly)": ["a", "b", "c", "d", "e", "f"], + "B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], + "year": [1970, 1970, 1970, 1980, 1980, 1980], + "id": [0, 1, 2, 0, 1, 2], + } + expected = DataFrame(exp_data) + expected = expected.set_index(["id", "year"])[ + ["X", "A(quarterly)", "B(quarterly)"] + ] + result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year") + tm.assert_frame_equal(result, expected) + + def test_unbalanced(self): + # test that we can have a varying amount of time variables + df = DataFrame( + { + "A2010": [1.0, 2.0], + "A2011": [3.0, 4.0], + "B2010": [5.0, 6.0], + "X": ["X1", "X2"], + } + ) + df["id"] = df.index + exp_data = { + "X": ["X1", "X2", "X1", "X2"], + "A": [1.0, 2.0, 3.0, 4.0], + "B": [5.0, 6.0, np.nan, np.nan], + "id": [0, 1, 0, 1], + "year": [2010, 2010, 2011, 2011], + } + expected = DataFrame(exp_data) + expected = expected.set_index(["id", "year"])[["X", "A", "B"]] + result = wide_to_long(df, ["A", "B"], i="id", j="year") + tm.assert_frame_equal(result, expected) + + def test_character_overlap(self): + # Test we handle overlapping characters in both id_vars and value_vars + df = DataFrame( + { + "A11": ["a11", "a22", "a33"], + "A12": ["a21", "a22", "a23"], + "B11": ["b11", "b12", "b13"], + "B12": ["b21", "b22", "b23"], + "BB11": [1, 2, 3], + "BB12": [4, 5, 6], + "BBBX": [91, 92, 93], + "BBBZ": [91, 92, 93], + } + ) + df["id"] = df.index + expected = DataFrame( + { + "BBBX": [91, 92, 93, 91, 92, 93], + "BBBZ": [91, 92, 93, 91, 92, 93], + "A": ["a11", "a22", "a33", "a21", "a22", "a23"], + "B": ["b11", "b12", "b13", "b21", "b22", "b23"], + "BB": [1, 2, 3, 4, 5, 6], + "id": [0, 1, 2, 0, 1, 2], + "year": [11, 11, 11, 12, 12, 12], + } + ) + expected = expected.set_index(["id", "year"])[["BBBX", "BBBZ", "A", "B", "BB"]] + result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year") + tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) + + def test_invalid_separator(self): + # if an invalid separator is supplied a empty data frame is returned + sep = "nope!" + df = DataFrame( + { + "A2010": [1.0, 2.0], + "A2011": [3.0, 4.0], + "B2010": [5.0, 6.0], + "X": ["X1", "X2"], + } + ) + df["id"] = df.index + exp_data = { + "X": "", + "A2010": [], + "A2011": [], + "B2010": [], + "id": [], + "year": [], + "A": [], + "B": [], + } + expected = DataFrame(exp_data).astype({"year": np.int64}) + expected = expected.set_index(["id", "year"])[ + ["X", "A2010", "A2011", "B2010", "A", "B"] + ] + expected.index = expected.index.set_levels([0, 1], level=0) + result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep) + tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) + + def test_num_string_disambiguation(self): + # Test that we can disambiguate number value_vars from + # string value_vars + df = DataFrame( + { + "A11": ["a11", "a22", "a33"], + "A12": ["a21", "a22", "a23"], + "B11": ["b11", "b12", "b13"], + "B12": ["b21", "b22", "b23"], + "BB11": [1, 2, 3], + "BB12": [4, 5, 6], + "Arating": [91, 92, 93], + "Arating_old": [91, 92, 93], + } + ) + df["id"] = df.index + expected = DataFrame( + { + "Arating": [91, 92, 93, 91, 92, 93], + "Arating_old": [91, 92, 93, 91, 92, 93], + "A": ["a11", "a22", "a33", "a21", "a22", "a23"], + "B": ["b11", "b12", "b13", "b21", "b22", "b23"], + "BB": [1, 2, 3, 4, 5, 6], + "id": [0, 1, 2, 0, 1, 2], + "year": [11, 11, 11, 12, 12, 12], + } + ) + expected = expected.set_index(["id", "year"])[ + ["Arating", "Arating_old", "A", "B", "BB"] + ] + result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year") + tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) + + def test_invalid_suffixtype(self): + # If all stubs names end with a string, but a numeric suffix is + # assumed, an empty data frame is returned + df = DataFrame( + { + "Aone": [1.0, 2.0], + "Atwo": [3.0, 4.0], + "Bone": [5.0, 6.0], + "X": ["X1", "X2"], + } + ) + df["id"] = df.index + exp_data = { + "X": "", + "Aone": [], + "Atwo": [], + "Bone": [], + "id": [], + "year": [], + "A": [], + "B": [], + } + expected = DataFrame(exp_data).astype({"year": np.int64}) + + expected = expected.set_index(["id", "year"]) + expected.index = expected.index.set_levels([0, 1], level=0) + result = wide_to_long(df, ["A", "B"], i="id", j="year") + tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) + + def test_multiple_id_columns(self): + # Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm + df = DataFrame( + { + "famid": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "birth": [1, 2, 3, 1, 2, 3, 1, 2, 3], + "ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], + "ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9], + } + ) + expected = DataFrame( + { + "ht": [ + 2.8, + 3.4, + 2.9, + 3.8, + 2.2, + 2.9, + 2.0, + 3.2, + 1.8, + 2.8, + 1.9, + 2.4, + 2.2, + 3.3, + 2.3, + 3.4, + 2.1, + 2.9, + ], + "famid": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3], + "birth": [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3], + "age": [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2], + } + ) + expected = expected.set_index(["famid", "birth", "age"])[["ht"]] + result = wide_to_long(df, "ht", i=["famid", "birth"], j="age") + tm.assert_frame_equal(result, expected) + + def test_non_unique_idvars(self): + # GH16382 + # Raise an error message if non unique id vars (i) are passed + df = DataFrame( + {"A_A1": [1, 2, 3, 4, 5], "B_B1": [1, 2, 3, 4, 5], "x": [1, 1, 1, 1, 1]} + ) + msg = "the id variables need to uniquely identify each row" + with pytest.raises(ValueError, match=msg): + wide_to_long(df, ["A_A", "B_B"], i="x", j="colname") + + def test_cast_j_int(self): + df = DataFrame( + { + "actor_1": ["CCH Pounder", "Johnny Depp", "Christoph Waltz"], + "actor_2": ["Joel David Moore", "Orlando Bloom", "Rory Kinnear"], + "actor_fb_likes_1": [1000.0, 40000.0, 11000.0], + "actor_fb_likes_2": [936.0, 5000.0, 393.0], + "title": ["Avatar", "Pirates of the Caribbean", "Spectre"], + } + ) + + expected = DataFrame( + { + "actor": [ + "CCH Pounder", + "Johnny Depp", + "Christoph Waltz", + "Joel David Moore", + "Orlando Bloom", + "Rory Kinnear", + ], + "actor_fb_likes": [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0], + "num": [1, 1, 1, 2, 2, 2], + "title": [ + "Avatar", + "Pirates of the Caribbean", + "Spectre", + "Avatar", + "Pirates of the Caribbean", + "Spectre", + ], + } + ).set_index(["title", "num"]) + result = wide_to_long( + df, ["actor", "actor_fb_likes"], i="title", j="num", sep="_" + ) + + tm.assert_frame_equal(result, expected) + + def test_identical_stubnames(self): + df = DataFrame( + { + "A2010": [1.0, 2.0], + "A2011": [3.0, 4.0], + "B2010": [5.0, 6.0], + "A": ["X1", "X2"], + } + ) + msg = "stubname can't be identical to a column name" + with pytest.raises(ValueError, match=msg): + wide_to_long(df, ["A", "B"], i="A", j="colname") + + def test_nonnumeric_suffix(self): + df = DataFrame( + { + "treatment_placebo": [1.0, 2.0], + "treatment_test": [3.0, 4.0], + "result_placebo": [5.0, 6.0], + "A": ["X1", "X2"], + } + ) + expected = DataFrame( + { + "A": ["X1", "X2", "X1", "X2"], + "colname": ["placebo", "placebo", "test", "test"], + "result": [5.0, 6.0, np.nan, np.nan], + "treatment": [1.0, 2.0, 3.0, 4.0], + } + ) + expected = expected.set_index(["A", "colname"]) + result = wide_to_long( + df, ["result", "treatment"], i="A", j="colname", suffix="[a-z]+", sep="_" + ) + tm.assert_frame_equal(result, expected) + + def test_mixed_type_suffix(self): + df = DataFrame( + { + "A": ["X1", "X2"], + "result_1": [0, 9], + "result_foo": [5.0, 6.0], + "treatment_1": [1.0, 2.0], + "treatment_foo": [3.0, 4.0], + } + ) + expected = DataFrame( + { + "A": ["X1", "X2", "X1", "X2"], + "colname": ["1", "1", "foo", "foo"], + "result": [0.0, 9.0, 5.0, 6.0], + "treatment": [1.0, 2.0, 3.0, 4.0], + } + ).set_index(["A", "colname"]) + result = wide_to_long( + df, ["result", "treatment"], i="A", j="colname", suffix=".+", sep="_" + ) + tm.assert_frame_equal(result, expected) + + def test_float_suffix(self): + df = DataFrame( + { + "treatment_1.1": [1.0, 2.0], + "treatment_2.1": [3.0, 4.0], + "result_1.2": [5.0, 6.0], + "result_1": [0, 9], + "A": ["X1", "X2"], + } + ) + expected = DataFrame( + { + "A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"], + "colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1], + "result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan], + "treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0], + } + ) + expected = expected.set_index(["A", "colname"]) + result = wide_to_long( + df, ["result", "treatment"], i="A", j="colname", suffix="[0-9.]+", sep="_" + ) + tm.assert_frame_equal(result, expected) + + def test_col_substring_of_stubname(self): + # GH22468 + # Don't raise ValueError when a column name is a substring + # of a stubname that's been passed as a string + wide_data = { + "node_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}, + "A": {0: 0.80, 1: 0.0, 2: 0.25, 3: 1.0, 4: 0.81}, + "PA0": {0: 0.74, 1: 0.56, 2: 0.56, 3: 0.98, 4: 0.6}, + "PA1": {0: 0.77, 1: 0.64, 2: 0.52, 3: 0.98, 4: 0.67}, + "PA3": {0: 0.34, 1: 0.70, 2: 0.52, 3: 0.98, 4: 0.67}, + } + wide_df = DataFrame.from_dict(wide_data) + expected = wide_to_long(wide_df, stubnames=["PA"], i=["node_id", "A"], j="time") + result = wide_to_long(wide_df, stubnames="PA", i=["node_id", "A"], j="time") + tm.assert_frame_equal(result, expected) + + def test_raise_of_column_name_value(self): + # GH34731, enforced in 2.0 + # raise a ValueError if the resultant value column name matches + # a name in the dataframe already (default name is "value") + df = DataFrame({"col": list("ABC"), "value": range(10, 16, 2)}) + + with pytest.raises( + ValueError, match=re.escape("value_name (value) cannot match") + ): + df.melt(id_vars="value", value_name="value") + + @pytest.mark.parametrize("dtype", ["O", "string"]) + def test_missing_stubname(self, dtype): + # GH46044 + df = DataFrame({"id": ["1", "2"], "a-1": [100, 200], "a-2": [300, 400]}) + df = df.astype({"id": dtype}) + result = wide_to_long( + df, + stubnames=["a", "b"], + i="id", + j="num", + sep="-", + ) + index = Index( + [("1", 1), ("2", 1), ("1", 2), ("2", 2)], + name=("id", "num"), + ) + expected = DataFrame( + {"a": [100, 200, 300, 400], "b": [np.nan] * 4}, + index=index, + ) + new_level = expected.index.levels[0].astype(dtype) + expected.index = expected.index.set_levels(new_level, level=0) + tm.assert_frame_equal(result, expected) + + +def test_wide_to_long_pyarrow_string_columns(): + # GH 57066 + pytest.importorskip("pyarrow") + df = DataFrame( + { + "ID": {0: 1}, + "R_test1": {0: 1}, + "R_test2": {0: 1}, + "R_test3": {0: 2}, + "D": {0: 1}, + } + ) + df.columns = df.columns.astype("string[pyarrow_numpy]") + result = wide_to_long( + df, stubnames="R", i="ID", j="UNPIVOTED", sep="_", suffix=".*" + ) + expected = DataFrame( + [[1, 1], [1, 1], [1, 2]], + columns=Index(["D", "R"], dtype=object), + index=pd.MultiIndex.from_arrays( + [ + [1, 1, 1], + Index(["test1", "test2", "test3"], dtype="string[pyarrow_numpy]"), + ], + names=["ID", "UNPIVOTED"], + ), + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_qcut.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_qcut.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b19eef1106fd2f500fc74e0eb5157ca63cfeb7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_qcut.py @@ -0,0 +1,305 @@ +import os + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DatetimeIndex, + Interval, + IntervalIndex, + NaT, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, + cut, + date_range, + isna, + qcut, + timedelta_range, +) +import pandas._testing as tm +from pandas.api.types import CategoricalDtype + +from pandas.tseries.offsets import Day + + +def test_qcut(): + arr = np.random.default_rng(2).standard_normal(1000) + + # We store the bins as Index that have been + # rounded to comparisons are a bit tricky. + labels, _ = qcut(arr, 4, retbins=True) + ex_bins = np.quantile(arr, [0, 0.25, 0.5, 0.75, 1.0]) + + result = labels.categories.left.values + assert np.allclose(result, ex_bins[:-1], atol=1e-2) + + result = labels.categories.right.values + assert np.allclose(result, ex_bins[1:], atol=1e-2) + + ex_levels = cut(arr, ex_bins, include_lowest=True) + tm.assert_categorical_equal(labels, ex_levels) + + +def test_qcut_bounds(): + arr = np.random.default_rng(2).standard_normal(1000) + + factor = qcut(arr, 10, labels=False) + assert len(np.unique(factor)) == 10 + + +def test_qcut_specify_quantiles(): + arr = np.random.default_rng(2).standard_normal(100) + factor = qcut(arr, [0, 0.25, 0.5, 0.75, 1.0]) + + expected = qcut(arr, 4) + tm.assert_categorical_equal(factor, expected) + + +def test_qcut_all_bins_same(): + with pytest.raises(ValueError, match="edges.*unique"): + qcut([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 3) + + +def test_qcut_include_lowest(): + values = np.arange(10) + ii = qcut(values, 4) + + ex_levels = IntervalIndex( + [ + Interval(-0.001, 2.25), + Interval(2.25, 4.5), + Interval(4.5, 6.75), + Interval(6.75, 9), + ] + ) + tm.assert_index_equal(ii.categories, ex_levels) + + +def test_qcut_nas(): + arr = np.random.default_rng(2).standard_normal(100) + arr[:20] = np.nan + + result = qcut(arr, 4) + assert isna(result[:20]).all() + + +def test_qcut_index(): + result = qcut([0, 2], 2) + intervals = [Interval(-0.001, 1), Interval(1, 2)] + + expected = Categorical(intervals, ordered=True) + tm.assert_categorical_equal(result, expected) + + +def test_qcut_binning_issues(datapath): + # see gh-1978, gh-1979 + cut_file = datapath(os.path.join("reshape", "data", "cut_data.csv")) + arr = np.loadtxt(cut_file) + result = qcut(arr, 20) + + starts = [] + ends = [] + + for lev in np.unique(result): + s = lev.left + e = lev.right + assert s != e + + starts.append(float(s)) + ends.append(float(e)) + + for (sp, sn), (ep, en) in zip( + zip(starts[:-1], starts[1:]), zip(ends[:-1], ends[1:]) + ): + assert sp < sn + assert ep < en + assert ep <= sn + + +def test_qcut_return_intervals(): + ser = Series([0, 1, 2, 3, 4, 5, 6, 7, 8]) + res = qcut(ser, [0, 0.333, 0.666, 1]) + + exp_levels = np.array( + [Interval(-0.001, 2.664), Interval(2.664, 5.328), Interval(5.328, 8)] + ) + exp = Series(exp_levels.take([0, 0, 0, 1, 1, 1, 2, 2, 2])).astype( + CategoricalDtype(ordered=True) + ) + tm.assert_series_equal(res, exp) + + +@pytest.mark.parametrize("labels", ["foo", 1, True]) +def test_qcut_incorrect_labels(labels): + # GH 13318 + values = range(5) + msg = "Bin labels must either be False, None or passed in as a list-like argument" + with pytest.raises(ValueError, match=msg): + qcut(values, 4, labels=labels) + + +@pytest.mark.parametrize("labels", [["a", "b", "c"], list(range(3))]) +def test_qcut_wrong_length_labels(labels): + # GH 13318 + values = range(10) + msg = "Bin labels must be one fewer than the number of bin edges" + with pytest.raises(ValueError, match=msg): + qcut(values, 4, labels=labels) + + +@pytest.mark.parametrize( + "labels, expected", + [ + (["a", "b", "c"], Categorical(["a", "b", "c"], ordered=True)), + (list(range(3)), Categorical([0, 1, 2], ordered=True)), + ], +) +def test_qcut_list_like_labels(labels, expected): + # GH 13318 + values = range(3) + result = qcut(values, 3, labels=labels) + tm.assert_categorical_equal(result, expected) + + +@pytest.mark.parametrize( + "kwargs,msg", + [ + ({"duplicates": "drop"}, None), + ({}, "Bin edges must be unique"), + ({"duplicates": "raise"}, "Bin edges must be unique"), + ({"duplicates": "foo"}, "invalid value for 'duplicates' parameter"), + ], +) +def test_qcut_duplicates_bin(kwargs, msg): + # see gh-7751 + values = [0, 0, 0, 0, 1, 2, 3] + + if msg is not None: + with pytest.raises(ValueError, match=msg): + qcut(values, 3, **kwargs) + else: + result = qcut(values, 3, **kwargs) + expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)]) + tm.assert_index_equal(result.categories, expected) + + +@pytest.mark.parametrize( + "data,start,end", [(9.0, 8.999, 9.0), (0.0, -0.001, 0.0), (-9.0, -9.001, -9.0)] +) +@pytest.mark.parametrize("length", [1, 2]) +@pytest.mark.parametrize("labels", [None, False]) +def test_single_quantile(data, start, end, length, labels): + # see gh-15431 + ser = Series([data] * length) + result = qcut(ser, 1, labels=labels) + + if labels is None: + intervals = IntervalIndex([Interval(start, end)] * length, closed="right") + expected = Series(intervals).astype(CategoricalDtype(ordered=True)) + else: + expected = Series([0] * length, dtype=np.intp) + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "ser", + [ + Series(DatetimeIndex(["20180101", NaT, "20180103"])), + Series(TimedeltaIndex(["0 days", NaT, "2 days"])), + ], + ids=lambda x: str(x.dtype), +) +def test_qcut_nat(ser, unit): + # see gh-19768 + ser = ser.dt.as_unit(unit) + td = Timedelta(1, unit=unit).as_unit(unit) + + left = Series([ser[0] - td, np.nan, ser[2] - Day()], dtype=ser.dtype) + right = Series([ser[2] - Day(), np.nan, ser[2]], dtype=ser.dtype) + intervals = IntervalIndex.from_arrays(left, right) + expected = Series(Categorical(intervals, ordered=True)) + + result = qcut(ser, 2) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("bins", [3, np.linspace(0, 1, 4)]) +def test_datetime_tz_qcut(bins): + # see gh-19872 + tz = "US/Eastern" + ser = Series(date_range("20130101", periods=3, tz=tz)) + + result = qcut(ser, bins) + expected = Series( + IntervalIndex( + [ + Interval( + Timestamp("2012-12-31 23:59:59.999999999", tz=tz), + Timestamp("2013-01-01 16:00:00", tz=tz), + ), + Interval( + Timestamp("2013-01-01 16:00:00", tz=tz), + Timestamp("2013-01-02 08:00:00", tz=tz), + ), + Interval( + Timestamp("2013-01-02 08:00:00", tz=tz), + Timestamp("2013-01-03 00:00:00", tz=tz), + ), + ] + ) + ).astype(CategoricalDtype(ordered=True)) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "arg,expected_bins", + [ + [ + timedelta_range("1day", periods=3), + TimedeltaIndex(["1 days", "2 days", "3 days"]), + ], + [ + date_range("20180101", periods=3), + DatetimeIndex(["2018-01-01", "2018-01-02", "2018-01-03"]), + ], + ], +) +def test_date_like_qcut_bins(arg, expected_bins): + # see gh-19891 + ser = Series(arg) + result, result_bins = qcut(ser, 2, retbins=True) + tm.assert_index_equal(result_bins, expected_bins) + + +@pytest.mark.parametrize("bins", [6, 7]) +@pytest.mark.parametrize( + "box, compare", + [ + (Series, tm.assert_series_equal), + (np.array, tm.assert_categorical_equal), + (list, tm.assert_equal), + ], +) +def test_qcut_bool_coercion_to_int(bins, box, compare): + # issue 20303 + data_expected = box([0, 1, 1, 0, 1] * 10) + data_result = box([False, True, True, False, True] * 10) + expected = qcut(data_expected, bins, duplicates="drop") + result = qcut(data_result, bins, duplicates="drop") + compare(result, expected) + + +@pytest.mark.parametrize("q", [2, 5, 10]) +def test_qcut_nullable_integer(q, any_numeric_ea_dtype): + arr = pd.array(np.arange(100), dtype=any_numeric_ea_dtype) + arr[::2] = pd.NA + + result = qcut(arr, q) + expected = qcut(arr.astype(float), q) + + tm.assert_categorical_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_util.py b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..4d0be7464cb3d97697323faef5b4e7cd0d9b6df0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/reshape/test_util.py @@ -0,0 +1,79 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + date_range, +) +import pandas._testing as tm +from pandas.core.reshape.util import cartesian_product + + +class TestCartesianProduct: + def test_simple(self): + x, y = list("ABC"), [1, 22] + result1, result2 = cartesian_product([x, y]) + expected1 = np.array(["A", "A", "B", "B", "C", "C"]) + expected2 = np.array([1, 22, 1, 22, 1, 22]) + tm.assert_numpy_array_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) + + def test_datetimeindex(self): + # regression test for GitHub issue #6439 + # make sure that the ordering on datetimeindex is consistent + x = date_range("2000-01-01", periods=2) + result1, result2 = (Index(y).day for y in cartesian_product([x, x])) + expected1 = Index([1, 1, 2, 2], dtype=np.int32) + expected2 = Index([1, 2, 1, 2], dtype=np.int32) + tm.assert_index_equal(result1, expected1) + tm.assert_index_equal(result2, expected2) + + def test_tzaware_retained(self): + x = date_range("2000-01-01", periods=2, tz="US/Pacific") + y = np.array([3, 4]) + result1, result2 = cartesian_product([x, y]) + + expected = x.repeat(2) + tm.assert_index_equal(result1, expected) + + def test_tzaware_retained_categorical(self): + x = date_range("2000-01-01", periods=2, tz="US/Pacific").astype("category") + y = np.array([3, 4]) + result1, result2 = cartesian_product([x, y]) + + expected = x.repeat(2) + tm.assert_index_equal(result1, expected) + + @pytest.mark.parametrize("x, y", [[[], []], [[0, 1], []], [[], ["a", "b", "c"]]]) + def test_empty(self, x, y): + # product of empty factors + expected1 = np.array([], dtype=np.asarray(x).dtype) + expected2 = np.array([], dtype=np.asarray(y).dtype) + result1, result2 = cartesian_product([x, y]) + tm.assert_numpy_array_equal(result1, expected1) + tm.assert_numpy_array_equal(result2, expected2) + + def test_empty_input(self): + # empty product (empty input): + result = cartesian_product([]) + expected = [] + assert result == expected + + @pytest.mark.parametrize( + "X", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]] + ) + def test_invalid_input(self, X): + msg = "Input must be a list-like of list-likes" + + with pytest.raises(TypeError, match=msg): + cartesian_product(X=X) + + def test_exceed_product_space(self): + # GH31355: raise useful error when produce space is too large + msg = "Product space too large to allocate arrays!" + + with pytest.raises(ValueError, match=msg): + dims = [np.arange(0, 22, dtype=np.int16) for i in range(12)] + [ + (np.arange(15128, dtype=np.int16)), + ] + cartesian_product(X=dims)