diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89cb076e75ef662280787b805d28ab34bd0cb497 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f361c1c5a1d9b17f1ff2fce79f768c4991cf5ea5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2f25240f8211fb143911076a14d157bc3822a41 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_category.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_category.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6605a1245b9e57337a06104779225331ec91bbbf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_category.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4506510f4e48a7d494125ecad2623d28896d9619 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d66df11db495bce741a233778793c7884bb4ab62 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbdce95ec3539d2db2f6d446217d5f01606bb0cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_formats.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e8b18c333ff69c93eb69862d8587fcba7992892 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_formats.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_indexing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea692ae199de9857bbf73d897a912a16afb42162 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8de1dd8d8145e754717c0035c07057949835d9d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_reindex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_reindex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a6954652bec3fdbed2b058b9350c01c6b9d81ec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_reindex.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec65c536acb1fc6b060120a8ecf856d1c31251fd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_append.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_append.py new file mode 100644 index 0000000000000000000000000000000000000000..b48c3219f5111a7a1226d09ce4625c723c4168fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_append.py @@ -0,0 +1,62 @@ +import pytest + +from pandas import ( + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +class TestAppend: + @pytest.fixture + def ci(self): + categories = list("cab") + return CategoricalIndex(list("aabbca"), categories=categories, ordered=False) + + def test_append(self, ci): + # append cats with the same categories + result = ci[:3].append(ci[3:]) + tm.assert_index_equal(result, ci, exact=True) + + foos = [ci[:1], ci[1:3], ci[3:]] + result = foos[0].append(foos[1:]) + tm.assert_index_equal(result, ci, exact=True) + + def test_append_empty(self, ci): + # empty + result = ci.append([]) + tm.assert_index_equal(result, ci, exact=True) + + def test_append_mismatched_categories(self, ci): + # appending with different categories or reordered is not ok + msg = "all inputs must be Index" + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.set_categories(list("abcd"))) + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.reorder_categories(list("abc"))) + + def test_append_category_objects(self, ci): + # with objects + result = ci.append(Index(["c", "a"])) + expected = CategoricalIndex(list("aabbcaca"), categories=ci.categories) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_non_categories(self, ci): + # invalid objects -> cast to object via concat_compat + result = ci.append(Index(["a", "d"])) + expected = Index(["a", "a", "b", "b", "c", "a", "a", "d"]) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_object(self, ci): + # GH#14298 - if base object is not categorical -> coerce to object + result = Index(["c", "a"]).append(ci) + expected = Index(list("caaabbca")) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_to_another(self): + # hits Index._concat + fst = Index(["a", "b"]) + snd = CategoricalIndex(["d", "e"]) + result = fst.append(snd) + expected = Index(["a", "b", "d", "e"]) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..a17627b7515b26b1fcfdca0feec376f03a018e83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py @@ -0,0 +1,90 @@ +from datetime import date + +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, + IntervalIndex, +) +import pandas._testing as tm + + +class TestAstype: + def test_astype(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + result = ci.astype(object) + tm.assert_index_equal(result, Index(np.array(ci), dtype=object)) + + # this IS equal, but not the same class + assert result.equals(ci) + assert isinstance(result, Index) + assert not isinstance(result, CategoricalIndex) + + # interval + ii = IntervalIndex.from_arrays(left=[-0.001, 2.0], right=[2, 4], closed="right") + + ci = CategoricalIndex( + Categorical.from_codes([0, 1, -1], categories=ii, ordered=True) + ) + + result = ci.astype("interval") + expected = ii.take([0, 1, -1], allow_fill=True, fill_value=np.nan) + tm.assert_index_equal(result, expected) + + result = IntervalIndex(result.values) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize("dtype_ordered", [True, False]) + @pytest.mark.parametrize("index_ordered", [True, False]) + def test_astype_category(self, name, dtype_ordered, index_ordered): + # GH#18630 + index = CategoricalIndex( + list("aabbca"), categories=list("cab"), ordered=index_ordered + ) + if name: + index = index.rename(name) + + # standard categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = index.astype(dtype) + expected = CategoricalIndex( + index.tolist(), + name=name, + categories=index.categories, + ordered=dtype_ordered, + ) + tm.assert_index_equal(result, expected) + + # non-standard categories + dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered) + result = index.astype(dtype) + expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype) + tm.assert_index_equal(result, expected) + + if dtype_ordered is False: + # dtype='category' can't specify ordered, so only test once + result = index.astype("category") + expected = index + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("box", [True, False]) + def test_categorical_date_roundtrip(self, box): + # astype to categorical and back should preserve date objects + v = date.today() + + obj = Index([v, v]) + assert obj.dtype == object + if box: + obj = obj.array + + cat = obj.astype("category") + + rtrip = cat.astype(object) + assert rtrip.dtype == object + assert type(rtrip[0]) is date diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py new file mode 100644 index 0000000000000000000000000000000000000000..03a298a13dc2b45b3e78ec2d6390741709e42590 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py @@ -0,0 +1,394 @@ +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import index as libindex +from pandas._libs.arrays import NDArrayBacked + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, +) +import pandas._testing as tm +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, +) + + +class TestCategoricalIndex: + @pytest.fixture + def simple_index(self) -> CategoricalIndex: + return CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + def test_can_hold_identifiers(self): + idx = CategoricalIndex(list("aabbca"), categories=None, ordered=False) + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is True + + def test_insert(self, simple_index): + ci = simple_index + categories = ci.categories + + # test 0th element + result = ci.insert(0, "a") + expected = CategoricalIndex(list("aaabbca"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # test Nth element that follows Python list behavior + result = ci.insert(-1, "a") + expected = CategoricalIndex(list("aabbcaa"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # test empty + result = CategoricalIndex([], categories=categories).insert(0, "a") + expected = CategoricalIndex(["a"], categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # invalid -> cast to object + expected = ci.astype(object).insert(0, "d") + result = ci.insert(0, "d").astype(object) + tm.assert_index_equal(result, expected, exact=True) + + # GH 18295 (test missing) + expected = CategoricalIndex(["a", np.nan, "a", "b", "c", "b"]) + for na in (np.nan, pd.NaT, None): + result = CategoricalIndex(list("aabcb")).insert(1, na) + tm.assert_index_equal(result, expected) + + def test_insert_na_mismatched_dtype(self): + ci = CategoricalIndex([0, 1, 1]) + result = ci.insert(0, pd.NaT) + expected = Index([pd.NaT, 0, 1, 1], dtype=object) + tm.assert_index_equal(result, expected) + + def test_delete(self, simple_index): + ci = simple_index + categories = ci.categories + + result = ci.delete(0) + expected = CategoricalIndex(list("abbca"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + result = ci.delete(-1) + expected = CategoricalIndex(list("aabbc"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + with tm.external_error_raised((IndexError, ValueError)): + # Either depending on NumPy version + ci.delete(10) + + @pytest.mark.parametrize( + "data, non_lexsorted_data", + [[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]], + ) + def test_is_monotonic(self, data, non_lexsorted_data): + c = CategoricalIndex(data) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(data, ordered=True) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(data, categories=reversed(data)) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is True + + c = CategoricalIndex(data, categories=reversed(data), ordered=True) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is True + + # test when data is neither monotonic increasing nor decreasing + reordered_data = [data[0], data[2], data[1]] + c = CategoricalIndex(reordered_data, categories=reversed(data)) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is False + + # non lexsorted categories + categories = non_lexsorted_data + + c = CategoricalIndex(categories[:2], categories=categories) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(categories[1:3], categories=categories) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + def test_has_duplicates(self): + idx = CategoricalIndex([0, 0, 0], name="foo") + assert idx.is_unique is False + assert idx.has_duplicates is True + + idx = CategoricalIndex([0, 1], categories=[2, 3], name="foo") + assert idx.is_unique is False + assert idx.has_duplicates is True + + idx = CategoricalIndex([0, 1, 2, 3], categories=[1, 2, 3], name="foo") + assert idx.is_unique is True + assert idx.has_duplicates is False + + @pytest.mark.parametrize( + "data, categories, expected", + [ + ( + [1, 1, 1], + [1, 2, 3], + { + "first": np.array([False, True, True]), + "last": np.array([True, True, False]), + False: np.array([True, True, True]), + }, + ), + ( + [1, 1, 1], + list("abc"), + { + "first": np.array([False, True, True]), + "last": np.array([True, True, False]), + False: np.array([True, True, True]), + }, + ), + ( + [2, "a", "b"], + list("abc"), + { + "first": np.zeros(shape=(3), dtype=np.bool_), + "last": np.zeros(shape=(3), dtype=np.bool_), + False: np.zeros(shape=(3), dtype=np.bool_), + }, + ), + ( + list("abb"), + list("abc"), + { + "first": np.array([False, False, True]), + "last": np.array([False, True, False]), + False: np.array([False, True, True]), + }, + ), + ], + ) + def test_drop_duplicates(self, data, categories, expected): + idx = CategoricalIndex(data, categories=categories, name="foo") + for keep, e in expected.items(): + tm.assert_numpy_array_equal(idx.duplicated(keep=keep), e) + e = idx[~e] + result = idx.drop_duplicates(keep=keep) + tm.assert_index_equal(result, e) + + @pytest.mark.parametrize( + "data, categories, expected_data", + [ + ([1, 1, 1], [1, 2, 3], [1]), + ([1, 1, 1], list("abc"), [np.nan]), + ([1, 2, "a"], [1, 2, 3], [1, 2, np.nan]), + ([2, "a", "b"], list("abc"), [np.nan, "a", "b"]), + ], + ) + def test_unique(self, data, categories, expected_data, ordered): + dtype = CategoricalDtype(categories, ordered=ordered) + + idx = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(expected_data, dtype=dtype) + tm.assert_index_equal(idx.unique(), expected) + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="repr doesn't roundtrip") + def test_repr_roundtrip(self): + ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + str(ci) + tm.assert_index_equal(eval(repr(ci)), ci, exact=True) + + # formatting + str(ci) + + # long format + # this is not reprable + ci = CategoricalIndex(np.random.default_rng(2).integers(0, 5, size=100)) + str(ci) + + def test_isin(self): + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + tm.assert_numpy_array_equal( + ci.isin(["c"]), np.array([False, False, False, True, False, False]) + ) + tm.assert_numpy_array_equal( + ci.isin(["c", "a", "b"]), np.array([True] * 5 + [False]) + ) + tm.assert_numpy_array_equal( + ci.isin(["c", "a", "b", np.nan]), np.array([True] * 6) + ) + + # mismatched categorical -> coerced to ndarray so doesn't matter + result = ci.isin(ci.set_categories(list("abcdefghi"))) + expected = np.array([True] * 6) + tm.assert_numpy_array_equal(result, expected) + + result = ci.isin(ci.set_categories(list("defghi"))) + expected = np.array([False] * 5 + [True]) + tm.assert_numpy_array_equal(result, expected) + + def test_isin_overlapping_intervals(self): + # GH 34974 + idx = pd.IntervalIndex([pd.Interval(0, 2), pd.Interval(0, 1)]) + result = CategoricalIndex(idx).isin(idx) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_identical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + assert ci1.identical(ci1) + assert ci1.identical(ci1.copy()) + assert not ci1.identical(ci2) + + def test_ensure_copied_data(self): + # gh-12309: Check the "copy" argument of each + # Index.__new__ is honored. + # + # Must be tested separately from other indexes because + # self.values is not an ndarray. + index = CategoricalIndex(list("ab") * 5) + + result = CategoricalIndex(index.values, copy=True) + tm.assert_index_equal(index, result) + assert not np.shares_memory(result._data._codes, index._data._codes) + + result = CategoricalIndex(index.values, copy=False) + assert result._data._codes is index._data._codes + + +class TestCategoricalIndex2: + def test_view_i8(self): + # GH#25464 + ci = CategoricalIndex(list("ab") * 50) + msg = "When changing to a larger dtype, its size must be a divisor" + with pytest.raises(ValueError, match=msg): + ci.view("i8") + with pytest.raises(ValueError, match=msg): + ci._data.view("i8") + + ci = ci[:-4] # length divisible by 8 + + res = ci.view("i8") + expected = ci._data.codes.view("i8") + tm.assert_numpy_array_equal(res, expected) + + cat = ci._data + tm.assert_numpy_array_equal(cat.view("i8"), expected) + + @pytest.mark.parametrize( + "dtype, engine_type", + [ + (np.int8, libindex.Int8Engine), + (np.int16, libindex.Int16Engine), + (np.int32, libindex.Int32Engine), + (np.int64, libindex.Int64Engine), + ], + ) + def test_engine_type(self, dtype, engine_type): + if dtype != np.int64: + # num. of uniques required to push CategoricalIndex.codes to a + # dtype (128 categories required for .codes dtype to be int16 etc.) + num_uniques = {np.int8: 1, np.int16: 128, np.int32: 32768}[dtype] + ci = CategoricalIndex(range(num_uniques)) + else: + # having 2**32 - 2**31 categories would be very memory-intensive, + # so we cheat a bit with the dtype + ci = CategoricalIndex(range(32768)) # == 2**16 - 2**(16 - 1) + arr = ci.values._ndarray.astype("int64") + NDArrayBacked.__init__(ci._data, arr, ci.dtype) + assert np.issubdtype(ci.codes.dtype, dtype) + assert isinstance(ci._engine, engine_type) + + @pytest.mark.parametrize( + "func,op_name", + [ + (lambda idx: idx - idx, "__sub__"), + (lambda idx: idx + idx, "__add__"), + (lambda idx: idx - ["a", "b"], "__sub__"), + (lambda idx: idx + ["a", "b"], "__add__"), + (lambda idx: ["a", "b"] - idx, "__rsub__"), + (lambda idx: ["a", "b"] + idx, "__radd__"), + ], + ) + def test_disallow_addsub_ops(self, func, op_name): + # GH 10039 + # set ops (+/-) raise TypeError + idx = Index(Categorical(["a", "b"])) + cat_or_list = "'(Categorical|list)' and '(Categorical|list)'" + msg = "|".join( + [ + f"cannot perform {op_name} with this index type: CategoricalIndex", + "can only concatenate list", + rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}", + ] + ) + with pytest.raises(TypeError, match=msg): + func(idx) + + def test_method_delegation(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.set_categories(list("cab")) + tm.assert_index_equal( + result, CategoricalIndex(list("aabbca"), categories=list("cab")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.rename_categories(list("efg")) + tm.assert_index_equal( + result, CategoricalIndex(list("ffggef"), categories=list("efg")) + ) + + # GH18862 (let rename_categories take callables) + result = ci.rename_categories(lambda x: x.upper()) + tm.assert_index_equal( + result, CategoricalIndex(list("AABBCA"), categories=list("CAB")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.add_categories(["d"]) + tm.assert_index_equal( + result, CategoricalIndex(list("aabbca"), categories=list("cabd")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.remove_categories(["c"]) + tm.assert_index_equal( + result, + CategoricalIndex(list("aabb") + [np.nan] + ["a"], categories=list("ab")), + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.as_unordered() + tm.assert_index_equal(result, ci) + + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.as_ordered() + tm.assert_index_equal( + result, + CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=True), + ) + + # invalid + msg = "cannot use inplace with CategoricalIndex" + with pytest.raises(ValueError, match=msg): + ci.set_categories(list("cab"), inplace=True) + + def test_remove_maintains_order(self): + ci = CategoricalIndex(list("abcdda"), categories=list("abcd")) + result = ci.reorder_categories(["d", "c", "b", "a"], ordered=True) + tm.assert_index_equal( + result, + CategoricalIndex(list("abcdda"), categories=list("dcba"), ordered=True), + ) + result = result.remove_categories(["c"]) + tm.assert_index_equal( + result, + CategoricalIndex( + ["a", "b", np.nan, "d", "d", "a"], categories=list("dba"), ordered=True + ), + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..f0c5307fc5c641ff25d26bd2bd8a158b43dd6a6d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py @@ -0,0 +1,142 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +class TestCategoricalIndexConstructors: + def test_construction_disallows_scalar(self): + msg = "must be called with a collection of some kind" + with pytest.raises(TypeError, match=msg): + CategoricalIndex(data=1, categories=list("abcd"), ordered=False) + with pytest.raises(TypeError, match=msg): + CategoricalIndex(categories=list("abcd"), ordered=False) + + def test_construction(self): + ci = CategoricalIndex(list("aabbca"), categories=list("abcd"), ordered=False) + categories = ci.categories + + result = Index(ci) + tm.assert_index_equal(result, ci, exact=True) + assert not result.ordered + + result = Index(ci.values) + tm.assert_index_equal(result, ci, exact=True) + assert not result.ordered + + # empty + result = CategoricalIndex([], categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal(result.codes, np.array([], dtype="int8")) + assert not result.ordered + + # passing categories + result = CategoricalIndex(list("aabbca"), categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + + c = Categorical(list("aabbca")) + result = CategoricalIndex(c) + tm.assert_index_equal(result.categories, Index(list("abc"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(c, categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + ci = CategoricalIndex(c, categories=list("abcd")) + result = CategoricalIndex(ci) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(ci, categories=list("ab")) + tm.assert_index_equal(result.categories, Index(list("ab"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(ci, categories=list("ab"), ordered=True) + tm.assert_index_equal(result.categories, Index(list("ab"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8") + ) + assert result.ordered + + result = CategoricalIndex(ci, categories=list("ab"), ordered=True) + expected = CategoricalIndex( + ci, categories=list("ab"), ordered=True, dtype="category" + ) + tm.assert_index_equal(result, expected, exact=True) + + # turn me to an Index + result = Index(np.array(ci)) + assert isinstance(result, Index) + assert not isinstance(result, CategoricalIndex) + + def test_construction_with_dtype(self): + # specify dtype + ci = CategoricalIndex(list("aabbca"), categories=list("abc"), ordered=False) + + result = Index(np.array(ci), dtype="category") + tm.assert_index_equal(result, ci, exact=True) + + result = Index(np.array(ci).tolist(), dtype="category") + tm.assert_index_equal(result, ci, exact=True) + + # these are generally only equal when the categories are reordered + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + result = Index(np.array(ci), dtype="category").reorder_categories(ci.categories) + tm.assert_index_equal(result, ci, exact=True) + + # make sure indexes are handled + idx = Index(range(3)) + expected = CategoricalIndex([0, 1, 2], categories=idx, ordered=True) + result = CategoricalIndex(idx, categories=idx, ordered=True) + tm.assert_index_equal(result, expected, exact=True) + + def test_construction_empty_with_bool_categories(self): + # see GH#22702 + cat = CategoricalIndex([], categories=[True, False]) + categories = sorted(cat.categories.tolist()) + assert categories == [False, True] + + def test_construction_with_categorical_dtype(self): + # construction with CategoricalDtype + # GH#18109 + data, cats, ordered = "a a b b".split(), "c b a".split(), True + dtype = CategoricalDtype(categories=cats, ordered=ordered) + + result = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(data, categories=cats, ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # GH#19032 + result = Index(data, dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + # error when combining categories/ordered and dtype kwargs + msg = "Cannot specify `categories` or `ordered` together with `dtype`." + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, categories=cats, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, ordered=ordered, dtype=dtype) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py new file mode 100644 index 0000000000000000000000000000000000000000..a8353f301a3c39a50b2a0c5541722551ff660e30 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py @@ -0,0 +1,96 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + MultiIndex, +) + + +class TestEquals: + def test_equals_categorical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + + assert ci1.equals(ci1) + assert not ci1.equals(ci2) + assert ci1.equals(ci1.astype(object)) + assert ci1.astype(object).equals(ci1) + + assert (ci1 == ci1).all() + assert not (ci1 != ci1).all() + assert not (ci1 > ci1).all() + assert not (ci1 < ci1).all() + assert (ci1 <= ci1).all() + assert (ci1 >= ci1).all() + + assert not (ci1 == 1).all() + assert (ci1 == Index(["a", "b"])).all() + assert (ci1 == ci1.values).all() + + # invalid comparisons + with pytest.raises(ValueError, match="Lengths must match"): + ci1 == Index(["a", "b", "c"]) + + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + ci1 == ci2 + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, ordered=False) + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, categories=list("abc")) + + # tests + # make sure that we are testing for category inclusion properly + ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + # Same categories, but different order + # Unordered + assert ci.equals(CategoricalIndex(list("aabca"))) + # Ordered + assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True)) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + assert not ci.equals(CategoricalIndex(list("aabca"))) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca") + [np.nan]) + assert ci.equals(CategoricalIndex(list("aabca") + [np.nan])) + assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True)) + assert ci.equals(ci.copy()) + + def test_equals_categorical_unordered(self): + # https://github.com/pandas-dev/pandas/issues/16603 + a = CategoricalIndex(["A"], categories=["A", "B"]) + b = CategoricalIndex(["A"], categories=["B", "A"]) + c = CategoricalIndex(["C"], categories=["B", "A"]) + assert a.equals(b) + assert not a.equals(c) + assert not b.equals(c) + + def test_equals_non_category(self): + # GH#37667 Case where other contains a value not among ci's + # categories ("D") and also contains np.nan + ci = CategoricalIndex(["A", "B", np.nan, np.nan]) + other = Index(["A", "B", "D", np.nan]) + + assert not ci.equals(other) + + def test_equals_multiindex(self): + # dont raise NotImplementedError when calling is_dtype_compat + + mi = MultiIndex.from_arrays([["A", "B", "C", "D"], range(4)]) + ci = mi.to_flat_index().astype("category") + + assert not ci.equals(mi) + + def test_equals_string_dtype(self, any_string_dtype): + # GH#55364 + idx = CategoricalIndex(list("abc"), name="B") + other = Index(["a", "b", "c"], name="B", dtype=any_string_dtype) + assert idx.equals(other) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_fillna.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_fillna.py new file mode 100644 index 0000000000000000000000000000000000000000..09de578f3c649e5a90278f11b1e3cd5b1d0646d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_fillna.py @@ -0,0 +1,54 @@ +import numpy as np +import pytest + +from pandas import CategoricalIndex +import pandas._testing as tm + + +class TestFillNA: + def test_fillna_categorical(self): + # GH#11343 + idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name="x") + # fill by value in categories + exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name="x") + tm.assert_index_equal(idx.fillna(1.0), exp) + + cat = idx._data + + # fill by value not in categories raises TypeError on EA, casts on CI + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + cat.fillna(2.0) + + result = idx.fillna(2.0) + expected = idx.astype(object).fillna(2.0) + tm.assert_index_equal(result, expected) + + def test_fillna_copies_with_no_nas(self): + # Nothing to fill, should still get a copy for the Categorical method, + # but OK to get a view on CategoricalIndex method + ci = CategoricalIndex([0, 1, 1]) + result = ci.fillna(0) + assert result is not ci + assert tm.shares_memory(result, ci) + + # But at the EA level we always get a copy. + cat = ci._data + result = cat.fillna(0) + assert result._ndarray is not cat._ndarray + assert result._ndarray.base is None + assert not tm.shares_memory(result, cat) + + def test_fillna_validates_with_no_nas(self): + # We validate the fill value even if fillna is a no-op + ci = CategoricalIndex([2, 3, 3]) + cat = ci._data + + msg = "Cannot setitem on a Categorical with a new category" + res = ci.fillna(False) + # nothing to fill, so we dont cast + tm.assert_index_equal(res, ci) + + # Same check directly on the Categorical + with pytest.raises(TypeError, match=msg): + cat.fillna(False) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..522ca1bc2afde451e2d6feff780a973aceb4c39f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py @@ -0,0 +1,120 @@ +""" +Tests for CategoricalIndex.__repr__ and related methods. +""" +import pytest + +from pandas._config import using_pyarrow_string_dtype +import pandas._config.config as cf + +from pandas import CategoricalIndex +import pandas._testing as tm + + +class TestCategoricalIndexRepr: + def test_format_different_scalar_lengths(self): + # GH#35439 + idx = CategoricalIndex(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + msg = r"CategoricalIndex\.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert idx.format() == expected + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="repr different") + def test_string_categorical_index_repr(self): + # short + idx = CategoricalIndex(["a", "bb", "ccc"]) + expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["a", "bb", "ccc"] * 10) + expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', + 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["a", "bb", "ccc"] * 100) + expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + ... + 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("abcdefghijklmmo")) + expected = """CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', + 'm', 'm', 'o'], + categories=['a', 'b', 'c', 'd', ..., 'k', 'l', 'm', 'o'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # short + idx = CategoricalIndex(["あ", "いい", "ううう"]) + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["あ", "いい", "ううう"] * 10) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["あ", "いい", "ううう"] * 100) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + ... + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) + expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', + 'す', 'せ', 'そ'], + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # Enable Unicode option ----------------------------------------- + with cf.option_context("display.unicode.east_asian_width", True): + # short + idx = CategoricalIndex(["あ", "いい", "ううう"]) + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["あ", "いい", "ううう"] * 10) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', + 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["あ", "いい", "ううう"] * 100) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', + ... + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', + 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) + expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', + 'さ', 'し', 'す', 'せ', 'そ'], + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_indexing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..49eb79da616e7603b70ee3189e9004dd51fb33e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_indexing.py @@ -0,0 +1,420 @@ +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +import pandas as pd +from pandas import ( + CategoricalIndex, + Index, + IntervalIndex, + Timestamp, +) +import pandas._testing as tm + + +class TestTake: + def test_take_fill_value(self): + # GH 12631 + + # numeric category + idx = CategoricalIndex([1, 2, 3], name="xxx") + result = idx.take(np.array([1, 0, -1])) + expected = CategoricalIndex([2, 1, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = CategoricalIndex([2, 1, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # object category + idx = CategoricalIndex( + list("CBA"), categories=list("ABC"), ordered=True, name="xxx" + ) + result = idx.take(np.array([1, 0, -1])) + expected = CategoricalIndex( + list("BCA"), categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = CategoricalIndex( + ["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = CategoricalIndex( + list("BCA"), categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + def test_take_fill_value_datetime(self): + # datetime category + idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx") + idx = CategoricalIndex(idx) + result = idx.take(np.array([1, 0, -1])) + expected = pd.DatetimeIndex( + ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx" + ) + expected = CategoricalIndex(expected) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx") + exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"]) + expected = CategoricalIndex(expected, categories=exp_cats) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = pd.DatetimeIndex( + ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx" + ) + expected = CategoricalIndex(expected) + tm.assert_index_equal(result, expected) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + def test_take_invalid_kwargs(self): + idx = CategoricalIndex([1, 2, 3], name="foo") + indices = [1, 0, -1] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + +class TestGetLoc: + def test_get_loc(self): + # GH 12531 + cidx1 = CategoricalIndex(list("abcde"), categories=list("edabc")) + idx1 = Index(list("abcde")) + assert cidx1.get_loc("a") == idx1.get_loc("a") + assert cidx1.get_loc("e") == idx1.get_loc("e") + + for i in [cidx1, idx1]: + with pytest.raises(KeyError, match="'NOT-EXIST'"): + i.get_loc("NOT-EXIST") + + # non-unique + cidx2 = CategoricalIndex(list("aacded"), categories=list("edabc")) + idx2 = Index(list("aacded")) + + # results in bool array + res = cidx2.get_loc("d") + tm.assert_numpy_array_equal(res, idx2.get_loc("d")) + tm.assert_numpy_array_equal( + res, np.array([False, False, False, True, False, True]) + ) + # unique element results in scalar + res = cidx2.get_loc("e") + assert res == idx2.get_loc("e") + assert res == 4 + + for i in [cidx2, idx2]: + with pytest.raises(KeyError, match="'NOT-EXIST'"): + i.get_loc("NOT-EXIST") + + # non-unique, sliceable + cidx3 = CategoricalIndex(list("aabbb"), categories=list("abc")) + idx3 = Index(list("aabbb")) + + # results in slice + res = cidx3.get_loc("a") + assert res == idx3.get_loc("a") + assert res == slice(0, 2, None) + + res = cidx3.get_loc("b") + assert res == idx3.get_loc("b") + assert res == slice(2, 5, None) + + for i in [cidx3, idx3]: + with pytest.raises(KeyError, match="'c'"): + i.get_loc("c") + + def test_get_loc_unique(self): + cidx = CategoricalIndex(list("abc")) + result = cidx.get_loc("b") + assert result == 1 + + def test_get_loc_monotonic_nonunique(self): + cidx = CategoricalIndex(list("abbc")) + result = cidx.get_loc("b") + expected = slice(1, 3, None) + assert result == expected + + def test_get_loc_nonmonotonic_nonunique(self): + cidx = CategoricalIndex(list("abcb")) + result = cidx.get_loc("b") + expected = np.array([False, True, False, True], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_get_loc_nan(self): + # GH#41933 + ci = CategoricalIndex(["A", "B", np.nan]) + res = ci.get_loc(np.nan) + + assert res == 2 + + +class TestGetIndexer: + def test_get_indexer_base(self): + # Determined by cat ordering. + idx = CategoricalIndex(list("cab"), categories=list("cab")) + expected = np.arange(len(idx), dtype=np.intp) + + actual = idx.get_indexer(idx) + tm.assert_numpy_array_equal(expected, actual) + + with pytest.raises(ValueError, match="Invalid fill method"): + idx.get_indexer(idx, method="invalid") + + def test_get_indexer_requires_unique(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + oidx = Index(np.array(ci)) + + msg = "Reindexing only valid with uniquely valued Index objects" + + for n in [1, 2, 5, len(ci)]: + finder = oidx[np.random.default_rng(2).integers(0, len(ci), size=n)] + + with pytest.raises(InvalidIndexError, match=msg): + ci.get_indexer(finder) + + # see gh-17323 + # + # Even when indexer is equal to the + # members in the index, we should + # respect duplicates instead of taking + # the fast-track path. + for finder in [list("aabbca"), list("aababca")]: + with pytest.raises(InvalidIndexError, match=msg): + ci.get_indexer(finder) + + def test_get_indexer_non_unique(self): + idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc")) + idx2 = CategoricalIndex(list("abf")) + + for indexer in [idx2, list("abf"), Index(list("abf"))]: + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + idx1.get_indexer(indexer) + + r1, _ = idx1.get_indexer_non_unique(indexer) + expected = np.array([0, 1, 2, -1], dtype=np.intp) + tm.assert_almost_equal(r1, expected) + + def test_get_indexer_method(self): + idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc")) + idx2 = CategoricalIndex(list("abf")) + + msg = "method pad not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="pad") + msg = "method backfill not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="backfill") + + msg = "method nearest not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="nearest") + + def test_get_indexer_array(self): + arr = np.array( + [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")], + dtype=object, + ) + cats = [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")] + ci = CategoricalIndex(cats, categories=cats, ordered=False, dtype="category") + result = ci.get_indexer(arr) + expected = np.array([0, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_same_order(self): + ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) + + result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["a", "b"])) + expected = np.array([1, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19551 + ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) + + result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["b", "a"])) + expected = np.array([1, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_nans_in_index_and_target(self): + # GH 45361 + ci = CategoricalIndex([1, 2, np.nan, 3]) + other1 = [2, 3, 4, np.nan] + res1 = ci.get_indexer(other1) + expected1 = np.array([1, 3, -1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(res1, expected1) + other2 = [1, 4, 2, 3] + res2 = ci.get_indexer(other2) + expected2 = np.array([0, -1, 1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(res2, expected2) + + +class TestWhere: + def test_where(self, listlike_box): + klass = listlike_box + + i = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + cond = [True] * len(i) + expected = i + result = i.where(klass(cond)) + tm.assert_index_equal(result, expected) + + cond = [False] + [True] * (len(i) - 1) + expected = CategoricalIndex([np.nan] + i[1:].tolist(), categories=i.categories) + result = i.where(klass(cond)) + tm.assert_index_equal(result, expected) + + def test_where_non_categories(self): + ci = CategoricalIndex(["a", "b", "c", "d"]) + mask = np.array([True, False, True, False]) + + result = ci.where(mask, 2) + expected = Index(["a", 2, "c", 2], dtype=object) + tm.assert_index_equal(result, expected) + + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + # Test the Categorical method directly + ci._data._where(mask, 2) + + +class TestContains: + def test_contains(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=False) + + assert "a" in ci + assert "z" not in ci + assert "e" not in ci + assert np.nan not in ci + + # assert codes NOT in index + assert 0 not in ci + assert 1 not in ci + + def test_contains_nan(self): + ci = CategoricalIndex(list("aabbca") + [np.nan], categories=list("cabdef")) + assert np.nan in ci + + @pytest.mark.parametrize("unwrap", [True, False]) + def test_contains_na_dtype(self, unwrap): + dti = pd.date_range("2016-01-01", periods=100).insert(0, pd.NaT) + pi = dti.to_period("D") + tdi = dti - dti[-1] + ci = CategoricalIndex(dti) + + obj = ci + if unwrap: + obj = ci._data + + assert np.nan in obj + assert None in obj + assert pd.NaT in obj + assert np.datetime64("NaT") in obj + assert np.timedelta64("NaT") not in obj + + obj2 = CategoricalIndex(tdi) + if unwrap: + obj2 = obj2._data + + assert np.nan in obj2 + assert None in obj2 + assert pd.NaT in obj2 + assert np.datetime64("NaT") not in obj2 + assert np.timedelta64("NaT") in obj2 + + obj3 = CategoricalIndex(pi) + if unwrap: + obj3 = obj3._data + + assert np.nan in obj3 + assert None in obj3 + assert pd.NaT in obj3 + assert np.datetime64("NaT") not in obj3 + assert np.timedelta64("NaT") not in obj3 + + @pytest.mark.parametrize( + "item, expected", + [ + (pd.Interval(0, 1), True), + (1.5, True), + (pd.Interval(0.5, 1.5), False), + ("a", False), + (Timestamp(1), False), + (pd.Timedelta(1), False), + ], + ids=str, + ) + def test_contains_interval(self, item, expected): + # GH 23705 + ci = CategoricalIndex(IntervalIndex.from_breaks(range(3))) + result = item in ci + assert result is expected + + def test_contains_list(self): + # GH#21729 + idx = CategoricalIndex([1, 2, 3]) + + assert "a" not in idx + + with pytest.raises(TypeError, match="unhashable type"): + ["a"] in idx + + with pytest.raises(TypeError, match="unhashable type"): + ["a", "b"] in idx diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_map.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_map.py new file mode 100644 index 0000000000000000000000000000000000000000..baf836594dfb5e03332b57522f39a679ee5b1e40 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_map.py @@ -0,0 +1,144 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalIndex, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "data, categories", + [ + (list("abcbca"), list("cab")), + (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)), + ], + ids=["string", "interval"], +) +def test_map_str(data, categories, ordered): + # GH 31202 - override base class since we want to maintain categorical/ordered + index = CategoricalIndex(data, categories=categories, ordered=ordered) + result = index.map(str) + expected = CategoricalIndex( + map(str, data), categories=map(str, categories), ordered=ordered + ) + tm.assert_index_equal(result, expected) + + +def test_map(): + ci = CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True) + result = ci.map(lambda x: x.lower()) + exp = CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True) + tm.assert_index_equal(result, exp) + + ci = CategoricalIndex( + list("ABABC"), categories=list("BAC"), ordered=False, name="XXX" + ) + result = ci.map(lambda x: x.lower()) + exp = CategoricalIndex( + list("ababc"), categories=list("bac"), ordered=False, name="XXX" + ) + tm.assert_index_equal(result, exp) + + # GH 12766: Return an index not an array + tm.assert_index_equal( + ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX") + ) + + # change categories dtype + ci = CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False) + + def f(x): + return {"A": 10, "B": 20, "C": 30}.get(x) + + result = ci.map(f) + exp = CategoricalIndex([10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False) + tm.assert_index_equal(result, exp) + + result = ci.map(Series([10, 20, 30], index=["A", "B", "C"])) + tm.assert_index_equal(result, exp) + + result = ci.map({"A": 10, "B": 20, "C": 30}) + tm.assert_index_equal(result, exp) + + +def test_map_with_categorical_series(): + # GH 12756 + a = Index([1, 2, 3, 4]) + b = Series(["even", "odd", "even", "odd"], dtype="category") + c = Series(["even", "odd", "even", "odd"]) + + exp = CategoricalIndex(["odd", "even", "odd", np.nan]) + tm.assert_index_equal(a.map(b), exp) + exp = Index(["odd", "even", "odd", np.nan]) + tm.assert_index_equal(a.map(c), exp) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], pd.isna, Index([False, False, np.nan])), + ([1, 1, np.nan], {1: False}, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + CategoricalIndex([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False, False, False]), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_ignore(data, f, expected): # GH 24241 + values = CategoricalIndex(data) + result = values.map(f, na_action="ignore") + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, Index([False, False, True])), + ([1, 2, np.nan], pd.isna, Index([False, False, True])), + ([1, 1, np.nan], {1: False}, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + CategoricalIndex([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False, False, False]), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_none(data, f, expected): # GH 24241 + values = CategoricalIndex(data) + result = values.map(f, na_action=None) + tm.assert_index_equal(result, expected) + + +def test_map_with_dict_or_series(): + orig_values = ["a", "B", 1, "a"] + new_values = ["one", 2, 3.0, "one"] + cur_index = CategoricalIndex(orig_values, name="XXX") + expected = CategoricalIndex(new_values, name="XXX", categories=[3.0, 2, "one"]) + + mapper = Series(new_values[:-1], index=orig_values[:-1]) + result = cur_index.map(mapper) + # Order of categories in result can be different + tm.assert_index_equal(result, expected) + + mapper = dict(zip(orig_values[:-1], new_values[:-1])) + result = cur_index.map(mapper) + # Order of categories in result can be different + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1f2b9fb159a6873c83e0a0a4e777913bb99fee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + Interval, +) +import pandas._testing as tm + + +class TestReindex: + def test_reindex_list_non_unique(self): + # GH#11586 + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) + + def test_reindex_categorical_non_unique(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) + + def test_reindex_list_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) + + def test_reindex_categorical_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) + + def test_reindex_duplicate_target(self): + # See GH25459 + cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"]) + res, indexer = cat.reindex(["a", "c", "c"]) + exp = Index(["a", "c", "c"]) + tm.assert_index_equal(res, exp, exact=True) + tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp)) + + res, indexer = cat.reindex( + CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"]) + ) + exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"]) + tm.assert_index_equal(res, exp, exact=True) + tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp)) + + def test_reindex_empty_index(self): + # See GH16770 + c = CategoricalIndex([]) + res, indexer = c.reindex(["a", "b"]) + tm.assert_index_equal(res, Index(["a", "b"]), exact=True) + tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp)) + + def test_reindex_categorical_added_category(self): + # GH 42424 + ci = CategoricalIndex( + [Interval(0, 1, closed="right"), Interval(1, 2, closed="right")], + ordered=True, + ) + ci_add = CategoricalIndex( + [ + Interval(0, 1, closed="right"), + Interval(1, 2, closed="right"), + Interval(2, 3, closed="right"), + Interval(3, 4, closed="right"), + ], + ordered=True, + ) + result, _ = ci.reindex(ci_add) + expected = ci_add + tm.assert_index_equal(expected, result) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_setops.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_setops.py new file mode 100644 index 0000000000000000000000000000000000000000..2e87b90efd54c8fcc4dcab7ec538d461add370de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_setops.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("na_value", [None, np.nan]) +def test_difference_with_na(na_value): + # GH 57318 + ci = CategoricalIndex(["a", "b", "c", None]) + other = Index(["c", na_value]) + result = ci.difference(other) + expected = CategoricalIndex(["a", "b"], categories=["a", "b", "c"]) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_drop_duplicates.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_drop_duplicates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb054354d023388f0f9d4caac8b64c14a83628dd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_drop_duplicates.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccd2cc416b47a53b5ca110cfc6d7bca5deee1820 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_equals.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91d1a03875210d19ad09f183ed82ea7814e09d91 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d2380d47691a8ff6bdb1b04bec55aa8cbeee7e4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_is_monotonic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_nat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_nat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75f47ba1390552e8467e0ff7460d465daee451d1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimelike_/__pycache__/test_nat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..492b2e846d403b641169b418c4945870bd97c4a6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_astype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58592a13c468b55f475458bd15db6a366b3b55f0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_astype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_delete.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_delete.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9b353a55d0f376f8a696d6c30b4a419dc4e0541 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_delete.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_factorize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_factorize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b905a6f4e68bf0e2a9404ee16811e55fcaca9e4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_factorize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_isocalendar.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_isocalendar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a04845d60ab5f91757b681020c88c2647fa80401 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_isocalendar.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_repeat.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_repeat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d57fc982a493bc82448777af42485e5d7bc2a5b8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_repeat.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_resolution.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_resolution.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2a86ae2d4bf085e44b20558f5e84f8af5c9b7d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_resolution.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_shift.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8505202ea7bb4bc4e10a2a15b4237ec90ca9680 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_shift.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_snap.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_snap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..116a0598f5eddcd019dee16a872f263dc4eb1022 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_snap.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_frame.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e1405b59c7b68d59eac82e90228b8414c892a72 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_frame.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_julian_date.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_julian_date.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c01f7b766789e06891b9396ccc450dfd892c1cbf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_julian_date.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_pydatetime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_pydatetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dcb71fe03f1855ee6df2b771ef33965538a528e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_to_pydatetime.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_tz_convert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_tz_convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a6a5664733714f320d5e83a693898400c89ce63 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_tz_convert.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_tz_localize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_tz_localize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cbfa9ebd3f38a98e6a63e887228ba02b35b5c35 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_tz_localize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_unique.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_unique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3d550c379fdf91f9512c3d315ebaed584196425 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/__pycache__/test_unique.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_asof.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_asof.py new file mode 100644 index 0000000000000000000000000000000000000000..dc92f533087bc3226727fac1810269520e1c4d1f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_asof.py @@ -0,0 +1,30 @@ +from datetime import timedelta + +from pandas import ( + Index, + Timestamp, + date_range, + isna, +) + + +class TestAsOf: + def test_asof_partial(self): + index = date_range("2010-01-01", periods=2, freq="ME") + expected = Timestamp("2010-02-28") + result = index.asof("2010-02") + assert result == expected + assert not isinstance(result, Index) + + def test_asof(self): + index = date_range("2020-01-01", periods=10) + + dt = index[0] + assert index.asof(dt) == dt + assert isna(index.asof(dt - timedelta(1))) + + dt = index[-1] + assert index.asof(dt + timedelta(1)) == dt + + dt = index[0].to_pydatetime() + assert isinstance(index.asof(dt), Timestamp) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_astype.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..c0bc6601769b1dce195f0af7303e5015bcddf589 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_astype.py @@ -0,0 +1,335 @@ +from datetime import datetime + +import dateutil +import numpy as np +import pytest +import pytz + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + NaT, + PeriodIndex, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestDatetimeIndex: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_dti_astype_asobject_around_dst_transition(self, tzstr): + # GH#1345 + + # dates around a dst transition + rng = date_range("2/13/2010", "5/6/2010", tz=tzstr) + + objs = rng.astype(object) + for i, x in enumerate(objs): + exval = rng[i] + assert x == exval + assert x.tzinfo == exval.tzinfo + + objs = rng.astype(object) + for i, x in enumerate(objs): + exval = rng[i] + assert x == exval + assert x.tzinfo == exval.tzinfo + + def test_astype(self): + # GH 13149, GH 13209 + idx = DatetimeIndex( + ["2016-05-16", "NaT", NaT, np.nan], dtype="M8[ns]", name="idx" + ) + + result = idx.astype(object) + expected = Index( + [Timestamp("2016-05-16")] + [NaT] * 3, dtype=object, name="idx" + ) + tm.assert_index_equal(result, expected) + + result = idx.astype(np.int64) + expected = Index( + [1463356800000000000] + [-9223372036854775808] * 3, + dtype=np.int64, + name="idx", + ) + tm.assert_index_equal(result, expected) + + def test_astype2(self): + rng = date_range("1/1/2000", periods=10, name="idx") + result = rng.astype("i8") + tm.assert_index_equal(result, Index(rng.asi8, name="idx")) + tm.assert_numpy_array_equal(result.values, rng.asi8) + + def test_astype_uint(self): + arr = date_range("2000", periods=2, name="idx") + + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint64") + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype("uint32") + + def test_astype_with_tz(self): + # with tz + rng = date_range("1/1/2000", periods=10, tz="US/Eastern") + msg = "Cannot use .astype to convert from timezone-aware" + with pytest.raises(TypeError, match=msg): + # deprecated + rng.astype("datetime64[ns]") + with pytest.raises(TypeError, match=msg): + # check DatetimeArray while we're here deprecated + rng._data.astype("datetime64[ns]") + + def test_astype_tzaware_to_tzaware(self): + # GH 18951: tz-aware to tz-aware + idx = date_range("20170101", periods=4, tz="US/Pacific") + result = idx.astype("datetime64[ns, US/Eastern]") + expected = date_range("20170101 03:00:00", periods=4, tz="US/Eastern") + tm.assert_index_equal(result, expected) + assert result.freq == expected.freq + + def test_astype_tznaive_to_tzaware(self): + # GH 18951: tz-naive to tz-aware + idx = date_range("20170101", periods=4) + idx = idx._with_freq(None) # tz_localize does not preserve freq + msg = "Cannot use .astype to convert from timezone-naive" + with pytest.raises(TypeError, match=msg): + # dt64->dt64tz deprecated + idx.astype("datetime64[ns, US/Eastern]") + with pytest.raises(TypeError, match=msg): + # dt64->dt64tz deprecated + idx._data.astype("datetime64[ns, US/Eastern]") + + def test_astype_str_nat(self): + # GH 13149, GH 13209 + # verify that we are returning NaT as a string (and not unicode) + + idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan]) + result = idx.astype(str) + expected = Index(["2016-05-16", "NaT", "NaT", "NaT"], dtype=object) + tm.assert_index_equal(result, expected) + + def test_astype_str(self): + # test astype string - #10442 + dti = date_range("2012-01-01", periods=4, name="test_name") + result = dti.astype(str) + expected = Index( + ["2012-01-01", "2012-01-02", "2012-01-03", "2012-01-04"], + name="test_name", + dtype=object, + ) + tm.assert_index_equal(result, expected) + + def test_astype_str_tz_and_name(self): + # test astype string with tz and name + dti = date_range("2012-01-01", periods=3, name="test_name", tz="US/Eastern") + result = dti.astype(str) + expected = Index( + [ + "2012-01-01 00:00:00-05:00", + "2012-01-02 00:00:00-05:00", + "2012-01-03 00:00:00-05:00", + ], + name="test_name", + dtype=object, + ) + tm.assert_index_equal(result, expected) + + def test_astype_str_freq_and_name(self): + # test astype string with freqH and name + dti = date_range("1/1/2011", periods=3, freq="h", name="test_name") + result = dti.astype(str) + expected = Index( + ["2011-01-01 00:00:00", "2011-01-01 01:00:00", "2011-01-01 02:00:00"], + name="test_name", + dtype=object, + ) + tm.assert_index_equal(result, expected) + + def test_astype_str_freq_and_tz(self): + # test astype string with freqH and timezone + dti = date_range( + "3/6/2012 00:00", periods=2, freq="h", tz="Europe/London", name="test_name" + ) + result = dti.astype(str) + expected = Index( + ["2012-03-06 00:00:00+00:00", "2012-03-06 01:00:00+00:00"], + dtype=object, + name="test_name", + ) + tm.assert_index_equal(result, expected) + + def test_astype_datetime64(self): + # GH 13149, GH 13209 + idx = DatetimeIndex( + ["2016-05-16", "NaT", NaT, np.nan], dtype="M8[ns]", name="idx" + ) + + result = idx.astype("datetime64[ns]") + tm.assert_index_equal(result, idx) + assert result is not idx + + result = idx.astype("datetime64[ns]", copy=False) + tm.assert_index_equal(result, idx) + assert result is idx + + idx_tz = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan], tz="EST", name="idx") + msg = "Cannot use .astype to convert from timezone-aware" + with pytest.raises(TypeError, match=msg): + # dt64tz->dt64 deprecated + result = idx_tz.astype("datetime64[ns]") + + def test_astype_object(self): + rng = date_range("1/1/2000", periods=20) + + casted = rng.astype("O") + exp_values = list(rng) + + tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_)) + assert casted.tolist() == exp_values + + @pytest.mark.parametrize("tz", [None, "Asia/Tokyo"]) + def test_astype_object_tz(self, tz): + idx = date_range(start="2013-01-01", periods=4, freq="ME", name="idx", tz=tz) + expected_list = [ + Timestamp("2013-01-31", tz=tz), + Timestamp("2013-02-28", tz=tz), + Timestamp("2013-03-31", tz=tz), + Timestamp("2013-04-30", tz=tz), + ] + expected = Index(expected_list, dtype=object, name="idx") + result = idx.astype(object) + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + def test_astype_object_with_nat(self): + idx = DatetimeIndex( + [datetime(2013, 1, 1), datetime(2013, 1, 2), NaT, datetime(2013, 1, 4)], + name="idx", + ) + expected_list = [ + Timestamp("2013-01-01"), + Timestamp("2013-01-02"), + NaT, + Timestamp("2013-01-04"), + ] + expected = Index(expected_list, dtype=object, name="idx") + result = idx.astype(object) + tm.assert_index_equal(result, expected) + assert idx.tolist() == expected_list + + @pytest.mark.parametrize( + "dtype", + [float, "timedelta64", "timedelta64[ns]", "datetime64", "datetime64[D]"], + ) + def test_astype_raises(self, dtype): + # GH 13149, GH 13209 + idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan]) + msg = "Cannot cast DatetimeIndex to dtype" + if dtype == "datetime64": + msg = "Casting to unit-less dtype 'datetime64' is not supported" + with pytest.raises(TypeError, match=msg): + idx.astype(dtype) + + def test_index_convert_to_datetime_array(self): + def _check_rng(rng): + converted = rng.to_pydatetime() + assert isinstance(converted, np.ndarray) + for x, stamp in zip(converted, rng): + assert isinstance(x, datetime) + assert x == stamp.to_pydatetime() + assert x.tzinfo == stamp.tzinfo + + rng = date_range("20090415", "20090519") + rng_eastern = date_range("20090415", "20090519", tz="US/Eastern") + rng_utc = date_range("20090415", "20090519", tz="utc") + + _check_rng(rng) + _check_rng(rng_eastern) + _check_rng(rng_utc) + + def test_index_convert_to_datetime_array_explicit_pytz(self): + def _check_rng(rng): + converted = rng.to_pydatetime() + assert isinstance(converted, np.ndarray) + for x, stamp in zip(converted, rng): + assert isinstance(x, datetime) + assert x == stamp.to_pydatetime() + assert x.tzinfo == stamp.tzinfo + + rng = date_range("20090415", "20090519") + rng_eastern = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern")) + rng_utc = date_range("20090415", "20090519", tz=pytz.utc) + + _check_rng(rng) + _check_rng(rng_eastern) + _check_rng(rng_utc) + + def test_index_convert_to_datetime_array_dateutil(self): + def _check_rng(rng): + converted = rng.to_pydatetime() + assert isinstance(converted, np.ndarray) + for x, stamp in zip(converted, rng): + assert isinstance(x, datetime) + assert x == stamp.to_pydatetime() + assert x.tzinfo == stamp.tzinfo + + rng = date_range("20090415", "20090519") + rng_eastern = date_range("20090415", "20090519", tz="dateutil/US/Eastern") + rng_utc = date_range("20090415", "20090519", tz=dateutil.tz.tzutc()) + + _check_rng(rng) + _check_rng(rng_eastern) + _check_rng(rng_utc) + + @pytest.mark.parametrize( + "tz, dtype", + [["US/Pacific", "datetime64[ns, US/Pacific]"], [None, "datetime64[ns]"]], + ) + def test_integer_index_astype_datetime(self, tz, dtype): + # GH 20997, 20964, 24559 + val = [Timestamp("2018-01-01", tz=tz).as_unit("ns")._value] + result = Index(val, name="idx").astype(dtype) + expected = DatetimeIndex(["2018-01-01"], tz=tz, name="idx").as_unit("ns") + tm.assert_index_equal(result, expected) + + def test_dti_astype_period(self): + idx = DatetimeIndex([NaT, "2011-01-01", "2011-02-01"], name="idx") + + res = idx.astype("period[M]") + exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="M", name="idx") + tm.assert_index_equal(res, exp) + + res = idx.astype("period[3M]") + exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="3M", name="idx") + tm.assert_index_equal(res, exp) + + +class TestAstype: + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_astype_category(self, tz): + obj = date_range("2000", periods=2, tz=tz, name="idx") + result = obj.astype("category") + dti = DatetimeIndex(["2000-01-01", "2000-01-02"], tz=tz).as_unit("ns") + expected = pd.CategoricalIndex( + dti, + name="idx", + ) + tm.assert_index_equal(result, expected) + + result = obj._data.astype("category") + expected = expected.values + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_astype_array_fallback(self, tz): + obj = date_range("2000", periods=2, tz=tz, name="idx") + result = obj.astype(bool) + expected = Index(np.array([True, True]), name="idx") + tm.assert_index_equal(result, expected) + + result = obj._data.astype(bool) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_delete.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_delete.py new file mode 100644 index 0000000000000000000000000000000000000000..2341499977f2247dc42c30470795378515f49dc8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_delete.py @@ -0,0 +1,141 @@ +import pytest + +from pandas import ( + DatetimeIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDelete: + def test_delete(self, unit): + idx = date_range( + start="2000-01-01", periods=5, freq="ME", name="idx", unit=unit + ) + + # preserve freq + expected_0 = date_range( + start="2000-02-01", periods=4, freq="ME", name="idx", unit=unit + ) + expected_4 = date_range( + start="2000-01-01", periods=4, freq="ME", name="idx", unit=unit + ) + + # reset freq to None + expected_1 = DatetimeIndex( + ["2000-01-31", "2000-03-31", "2000-04-30", "2000-05-31"], + freq=None, + name="idx", + ).as_unit(unit) + + cases = { + 0: expected_0, + -5: expected_0, + -1: expected_4, + 4: expected_4, + 1: expected_1, + } + for n, expected in cases.items(): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + with pytest.raises((IndexError, ValueError), match="out of bounds"): + # either depending on numpy version + idx.delete(5) + + @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Pacific"]) + def test_delete2(self, tz): + idx = date_range( + start="2000-01-01 09:00", periods=10, freq="h", name="idx", tz=tz + ) + + expected = date_range( + start="2000-01-01 10:00", periods=9, freq="h", name="idx", tz=tz + ) + result = idx.delete(0) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freqstr == "h" + assert result.tz == expected.tz + + expected = date_range( + start="2000-01-01 09:00", periods=9, freq="h", name="idx", tz=tz + ) + result = idx.delete(-1) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freqstr == "h" + assert result.tz == expected.tz + + def test_delete_slice(self, unit): + idx = date_range( + start="2000-01-01", periods=10, freq="D", name="idx", unit=unit + ) + + # preserve freq + expected_0_2 = date_range( + start="2000-01-04", periods=7, freq="D", name="idx", unit=unit + ) + expected_7_9 = date_range( + start="2000-01-01", periods=7, freq="D", name="idx", unit=unit + ) + + # reset freq to None + expected_3_5 = DatetimeIndex( + [ + "2000-01-01", + "2000-01-02", + "2000-01-03", + "2000-01-07", + "2000-01-08", + "2000-01-09", + "2000-01-10", + ], + freq=None, + name="idx", + ).as_unit(unit) + + cases = { + (0, 1, 2): expected_0_2, + (7, 8, 9): expected_7_9, + (3, 4, 5): expected_3_5, + } + for n, expected in cases.items(): + result = idx.delete(n) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + result = idx.delete(slice(n[0], n[-1] + 1)) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + # TODO: belongs in Series.drop tests? + @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Pacific"]) + def test_delete_slice2(self, tz, unit): + dti = date_range( + "2000-01-01 09:00", periods=10, freq="h", name="idx", tz=tz, unit=unit + ) + ts = Series( + 1, + index=dti, + ) + # preserve freq + result = ts.drop(ts.index[:5]).index + expected = dti[5:] + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz + + # reset freq to None + result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index + expected = dti[::2]._with_freq(None) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py new file mode 100644 index 0000000000000000000000000000000000000000..ebfe490e0e067807f7a38d3f8f285aee76718fcf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_insert.py @@ -0,0 +1,265 @@ +from datetime import datetime + +import numpy as np +import pytest +import pytz + +from pandas import ( + NA, + DatetimeIndex, + Index, + NaT, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestInsert: + @pytest.mark.parametrize("null", [None, np.nan, np.datetime64("NaT"), NaT, NA]) + @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) + def test_insert_nat(self, tz, null): + # GH#16537, GH#18295 (test missing) + + idx = DatetimeIndex(["2017-01-01"], tz=tz) + expected = DatetimeIndex(["NaT", "2017-01-01"], tz=tz) + if tz is not None and isinstance(null, np.datetime64): + expected = Index([null, idx[0]], dtype=object) + + res = idx.insert(0, null) + tm.assert_index_equal(res, expected) + + @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) + def test_insert_invalid_na(self, tz): + idx = DatetimeIndex(["2017-01-01"], tz=tz) + + item = np.timedelta64("NaT") + result = idx.insert(0, item) + expected = Index([item] + list(idx), dtype=object) + tm.assert_index_equal(result, expected) + + def test_insert_empty_preserves_freq(self, tz_naive_fixture): + # GH#33573 + tz = tz_naive_fixture + dti = DatetimeIndex([], tz=tz, freq="D") + item = Timestamp("2017-04-05").tz_localize(tz) + + result = dti.insert(0, item) + assert result.freq == dti.freq + + # But not when we insert an item that doesn't conform to freq + dti = DatetimeIndex([], tz=tz, freq="W-THU") + result = dti.insert(0, item) + assert result.freq is None + + def test_insert(self, unit): + idx = DatetimeIndex( + ["2000-01-04", "2000-01-01", "2000-01-02"], name="idx" + ).as_unit(unit) + + result = idx.insert(2, datetime(2000, 1, 5)) + exp = DatetimeIndex( + ["2000-01-04", "2000-01-01", "2000-01-05", "2000-01-02"], name="idx" + ).as_unit(unit) + tm.assert_index_equal(result, exp) + + # insertion of non-datetime should coerce to object index + result = idx.insert(1, "inserted") + expected = Index( + [ + datetime(2000, 1, 4), + "inserted", + datetime(2000, 1, 1), + datetime(2000, 1, 2), + ], + name="idx", + ) + assert not isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + + def test_insert2(self, unit): + idx = date_range("1/1/2000", periods=3, freq="ME", name="idx", unit=unit) + + # preserve freq + expected_0 = DatetimeIndex( + ["1999-12-31", "2000-01-31", "2000-02-29", "2000-03-31"], + name="idx", + freq="ME", + ).as_unit(unit) + expected_3 = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-04-30"], + name="idx", + freq="ME", + ).as_unit(unit) + + # reset freq to None + expected_1_nofreq = DatetimeIndex( + ["2000-01-31", "2000-01-31", "2000-02-29", "2000-03-31"], + name="idx", + freq=None, + ).as_unit(unit) + expected_3_nofreq = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], + name="idx", + freq=None, + ).as_unit(unit) + + cases = [ + (0, datetime(1999, 12, 31), expected_0), + (-3, datetime(1999, 12, 31), expected_0), + (3, datetime(2000, 4, 30), expected_3), + (1, datetime(2000, 1, 31), expected_1_nofreq), + (3, datetime(2000, 1, 2), expected_3_nofreq), + ] + + for n, d, expected in cases: + result = idx.insert(n, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + + def test_insert3(self, unit): + idx = date_range("1/1/2000", periods=3, freq="ME", name="idx", unit=unit) + + # reset freq to None + result = idx.insert(3, datetime(2000, 1, 2)) + expected = DatetimeIndex( + ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], + name="idx", + freq=None, + ).as_unit(unit) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq is None + + def test_insert4(self, unit): + for tz in ["US/Pacific", "Asia/Singapore"]: + idx = date_range( + "1/1/2000 09:00", periods=6, freq="h", tz=tz, name="idx", unit=unit + ) + # preserve freq + expected = date_range( + "1/1/2000 09:00", periods=7, freq="h", tz=tz, name="idx", unit=unit + ) + for d in [ + Timestamp("2000-01-01 15:00", tz=tz), + pytz.timezone(tz).localize(datetime(2000, 1, 1, 15)), + ]: + result = idx.insert(6, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.freq == expected.freq + assert result.tz == expected.tz + + expected = DatetimeIndex( + [ + "2000-01-01 09:00", + "2000-01-01 10:00", + "2000-01-01 11:00", + "2000-01-01 12:00", + "2000-01-01 13:00", + "2000-01-01 14:00", + "2000-01-01 10:00", + ], + name="idx", + tz=tz, + freq=None, + ).as_unit(unit) + # reset freq to None + for d in [ + Timestamp("2000-01-01 10:00", tz=tz), + pytz.timezone(tz).localize(datetime(2000, 1, 1, 10)), + ]: + result = idx.insert(6, d) + tm.assert_index_equal(result, expected) + assert result.name == expected.name + assert result.tz == expected.tz + assert result.freq is None + + # TODO: also changes DataFrame.__setitem__ with expansion + def test_insert_mismatched_tzawareness(self): + # see GH#7299 + idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx") + + # mismatched tz-awareness + item = Timestamp("2000-01-04") + result = idx.insert(3, item) + expected = Index( + list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx" + ) + tm.assert_index_equal(result, expected) + + # mismatched tz-awareness + item = datetime(2000, 1, 4) + result = idx.insert(3, item) + expected = Index( + list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx" + ) + tm.assert_index_equal(result, expected) + + # TODO: also changes DataFrame.__setitem__ with expansion + def test_insert_mismatched_tz(self): + # see GH#7299 + # pre-2.0 with mismatched tzs we would cast to object + idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx") + + # mismatched tz -> cast to object (could reasonably cast to same tz or UTC) + item = Timestamp("2000-01-04", tz="US/Eastern") + result = idx.insert(3, item) + expected = Index( + list(idx[:3]) + [item.tz_convert(idx.tz)] + list(idx[3:]), + name="idx", + ) + assert expected.dtype == idx.dtype + tm.assert_index_equal(result, expected) + + item = datetime(2000, 1, 4, tzinfo=pytz.timezone("US/Eastern")) + result = idx.insert(3, item) + expected = Index( + list(idx[:3]) + [item.astimezone(idx.tzinfo)] + list(idx[3:]), + name="idx", + ) + assert expected.dtype == idx.dtype + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "item", [0, np.int64(0), np.float64(0), np.array(0), np.timedelta64(456)] + ) + def test_insert_mismatched_types_raises(self, tz_aware_fixture, item): + # GH#33703 dont cast these to dt64 + tz = tz_aware_fixture + dti = date_range("2019-11-04", periods=9, freq="-1D", name=9, tz=tz) + + result = dti.insert(1, item) + + if isinstance(item, np.ndarray): + assert item.item() == 0 + expected = Index([dti[0], 0] + list(dti[1:]), dtype=object, name=9) + else: + expected = Index([dti[0], item] + list(dti[1:]), dtype=object, name=9) + + tm.assert_index_equal(result, expected) + + def test_insert_castable_str(self, tz_aware_fixture): + # GH#33703 + tz = tz_aware_fixture + dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz) + + value = "2019-11-05" + result = dti.insert(0, value) + + ts = Timestamp(value).tz_localize(tz) + expected = DatetimeIndex([ts] + list(dti), dtype=dti.dtype, name=9) + tm.assert_index_equal(result, expected) + + def test_insert_non_castable_str(self, tz_aware_fixture): + # GH#33703 + tz = tz_aware_fixture + dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz) + + value = "foo" + result = dti.insert(0, value) + + expected = Index(["foo"] + list(dti), dtype=object, name=9) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_normalize.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_normalize.py new file mode 100644 index 0000000000000000000000000000000000000000..74711f67e64465c5592e562fcc94202666d0ad67 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_normalize.py @@ -0,0 +1,95 @@ +from dateutil.tz import tzlocal +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DatetimeIndex, + NaT, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestNormalize: + def test_normalize(self): + rng = date_range("1/1/2000 9:30", periods=10, freq="D") + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D") + tm.assert_index_equal(result, expected) + + arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype( + "datetime64[ns]" + ) + rng_ns = DatetimeIndex(arr_ns) + rng_ns_normalized = rng_ns.normalize() + + arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype( + "datetime64[ns]" + ) + expected = DatetimeIndex(arr_ns) + tm.assert_index_equal(rng_ns_normalized, expected) + + assert result.is_normalized + assert not rng.is_normalized + + def test_normalize_nat(self): + dti = DatetimeIndex([NaT, Timestamp("2018-01-01 01:00:00")]) + result = dti.normalize() + expected = DatetimeIndex([NaT, Timestamp("2018-01-01")]) + tm.assert_index_equal(result, expected) + + def test_normalize_tz(self): + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern") + + result = rng.normalize() # does not preserve freq + expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern") + tm.assert_index_equal(result, expected._with_freq(None)) + + assert result.is_normalized + assert not rng.is_normalized + + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC") + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC") + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized + + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) + result = rng.normalize() # does not preserve freq + expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) + tm.assert_index_equal(result, expected._with_freq(None)) + + assert result.is_normalized + assert not rng.is_normalized + + @td.skip_if_windows + @pytest.mark.parametrize( + "timezone", + [ + "US/Pacific", + "US/Eastern", + "UTC", + "Asia/Kolkata", + "Asia/Shanghai", + "Australia/Canberra", + ], + ) + def test_normalize_tz_local(self, timezone): + # GH#13459 + with tm.set_timezone(timezone): + rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal()) + + result = rng.normalize() + expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal()) + expected = expected._with_freq(None) + tm.assert_index_equal(result, expected) + + assert result.is_normalized + assert not rng.is_normalized diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_to_period.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_to_period.py new file mode 100644 index 0000000000000000000000000000000000000000..de8d32f64cde26b2fa0a0720cbdacc56f6c2e983 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_to_period.py @@ -0,0 +1,225 @@ +import dateutil.tz +from dateutil.tz import tzlocal +import pytest +import pytz + +from pandas._libs.tslibs.ccalendar import MONTHS +from pandas._libs.tslibs.offsets import MonthEnd +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG + +from pandas import ( + DatetimeIndex, + Period, + PeriodIndex, + Timestamp, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestToPeriod: + def test_dti_to_period(self): + dti = date_range(start="1/1/2005", end="12/1/2005", freq="ME") + pi1 = dti.to_period() + pi2 = dti.to_period(freq="D") + pi3 = dti.to_period(freq="3D") + + assert pi1[0] == Period("Jan 2005", freq="M") + assert pi2[0] == Period("1/31/2005", freq="D") + assert pi3[0] == Period("1/31/2005", freq="3D") + + assert pi1[-1] == Period("Nov 2005", freq="M") + assert pi2[-1] == Period("11/30/2005", freq="D") + assert pi3[-1], Period("11/30/2005", freq="3D") + + tm.assert_index_equal(pi1, period_range("1/1/2005", "11/1/2005", freq="M")) + tm.assert_index_equal( + pi2, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("D") + ) + tm.assert_index_equal( + pi3, period_range("1/1/2005", "11/1/2005", freq="M").asfreq("3D") + ) + + @pytest.mark.parametrize("month", MONTHS) + def test_to_period_quarterly(self, month): + # make sure we can make the round trip + freq = f"Q-{month}" + rng = period_range("1989Q3", "1991Q3", freq=freq) + stamps = rng.to_timestamp() + result = stamps.to_period(freq) + tm.assert_index_equal(rng, result) + + @pytest.mark.parametrize("off", ["BQE", "QS", "BQS"]) + def test_to_period_quarterlyish(self, off): + rng = date_range("01-Jan-2012", periods=8, freq=off) + prng = rng.to_period() + assert prng.freq == "QE-DEC" + + @pytest.mark.parametrize("off", ["BYE", "YS", "BYS"]) + def test_to_period_annualish(self, off): + rng = date_range("01-Jan-2012", periods=8, freq=off) + prng = rng.to_period() + assert prng.freq == "YE-DEC" + + def test_to_period_monthish(self): + offsets = ["MS", "BME"] + for off in offsets: + rng = date_range("01-Jan-2012", periods=8, freq=off) + prng = rng.to_period() + assert prng.freqstr == "M" + + rng = date_range("01-Jan-2012", periods=8, freq="ME") + prng = rng.to_period() + assert prng.freqstr == "M" + + with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG): + date_range("01-Jan-2012", periods=8, freq="EOM") + + @pytest.mark.parametrize( + "freq_offset, freq_period", + [ + ("2ME", "2M"), + (MonthEnd(2), MonthEnd(2)), + ], + ) + def test_dti_to_period_2monthish(self, freq_offset, freq_period): + dti = date_range("2020-01-01", periods=3, freq=freq_offset) + pi = dti.to_period() + + tm.assert_index_equal(pi, period_range("2020-01", "2020-05", freq=freq_period)) + + @pytest.mark.parametrize( + "freq, freq_depr", + [ + ("2ME", "2M"), + ("2QE", "2Q"), + ("2QE-SEP", "2Q-SEP"), + ("1YE", "1Y"), + ("2YE-MAR", "2Y-MAR"), + ("1YE", "1A"), + ("2YE-MAR", "2A-MAR"), + ], + ) + def test_to_period_frequency_M_Q_Y_A_deprecated(self, freq, freq_depr): + # GH#9586 + msg = f"'{freq_depr[1:]}' is deprecated and will be removed " + f"in a future version, please use '{freq[1:]}' instead." + + rng = date_range("01-Jan-2012", periods=8, freq=freq) + prng = rng.to_period() + with tm.assert_produces_warning(FutureWarning, match=msg): + assert prng.freq == freq_depr + + def test_to_period_infer(self): + # https://github.com/pandas-dev/pandas/issues/33358 + rng = date_range( + start="2019-12-22 06:40:00+00:00", + end="2019-12-22 08:45:00+00:00", + freq="5min", + ) + + with tm.assert_produces_warning(UserWarning): + pi1 = rng.to_period("5min") + + with tm.assert_produces_warning(UserWarning): + pi2 = rng.to_period() + + tm.assert_index_equal(pi1, pi2) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_period_dt64_round_trip(self): + dti = date_range("1/1/2000", "1/7/2002", freq="B") + pi = dti.to_period() + tm.assert_index_equal(pi.to_timestamp(), dti) + + dti = date_range("1/1/2000", "1/7/2002", freq="B") + pi = dti.to_period(freq="h") + tm.assert_index_equal(pi.to_timestamp(), dti) + + def test_to_period_millisecond(self): + index = DatetimeIndex( + [ + Timestamp("2007-01-01 10:11:12.123456Z"), + Timestamp("2007-01-01 10:11:13.789123Z"), + ] + ) + + with tm.assert_produces_warning(UserWarning): + # warning that timezone info will be lost + period = index.to_period(freq="ms") + assert 2 == len(period) + assert period[0] == Period("2007-01-01 10:11:12.123Z", "ms") + assert period[1] == Period("2007-01-01 10:11:13.789Z", "ms") + + def test_to_period_microsecond(self): + index = DatetimeIndex( + [ + Timestamp("2007-01-01 10:11:12.123456Z"), + Timestamp("2007-01-01 10:11:13.789123Z"), + ] + ) + + with tm.assert_produces_warning(UserWarning): + # warning that timezone info will be lost + period = index.to_period(freq="us") + assert 2 == len(period) + assert period[0] == Period("2007-01-01 10:11:12.123456Z", "us") + assert period[1] == Period("2007-01-01 10:11:13.789123Z", "us") + + @pytest.mark.parametrize( + "tz", + ["US/Eastern", pytz.utc, tzlocal(), "dateutil/US/Eastern", dateutil.tz.tzutc()], + ) + def test_to_period_tz(self, tz): + ts = date_range("1/1/2000", "2/1/2000", tz=tz) + + with tm.assert_produces_warning(UserWarning): + # GH#21333 warning that timezone info will be lost + # filter warning about freq deprecation + + result = ts.to_period()[0] + expected = ts[0].to_period(ts.freq) + + assert result == expected + + expected = date_range("1/1/2000", "2/1/2000").to_period() + + with tm.assert_produces_warning(UserWarning): + # GH#21333 warning that timezone info will be lost + result = ts.to_period(ts.freq) + + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("tz", ["Etc/GMT-1", "Etc/GMT+1"]) + def test_to_period_tz_utc_offset_consistency(self, tz): + # GH#22905 + ts = date_range("1/1/2000", "2/1/2000", tz="Etc/GMT-1") + with tm.assert_produces_warning(UserWarning): + result = ts.to_period()[0] + expected = ts[0].to_period(ts.freq) + assert result == expected + + def test_to_period_nofreq(self): + idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"]) + msg = "You must pass a freq argument as current index has none." + with pytest.raises(ValueError, match=msg): + idx.to_period() + + idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="infer") + assert idx.freqstr == "D" + expected = PeriodIndex(["2000-01-01", "2000-01-02", "2000-01-03"], freq="D") + tm.assert_index_equal(idx.to_period(), expected) + + # GH#7606 + idx = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"]) + assert idx.freqstr is None + tm.assert_index_equal(idx.to_period(), expected) + + @pytest.mark.parametrize("freq", ["2BMS", "1SME-15"]) + def test_to_period_offsets_not_supported(self, freq): + # GH#56243 + msg = f"{freq[1:]} is not supported as period frequency" + ts = date_range("1/1/2012", periods=4, freq=freq) + with pytest.raises(ValueError, match=msg): + ts.to_period() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_to_series.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_to_series.py new file mode 100644 index 0000000000000000000000000000000000000000..0c397c8ab2cd310a2d4fdf59992ea4d123370ee0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_to_series.py @@ -0,0 +1,18 @@ +import numpy as np + +from pandas import ( + DatetimeIndex, + Series, +) +import pandas._testing as tm + + +class TestToSeries: + def test_to_series(self): + naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B") + idx = naive.tz_localize("US/Pacific") + + expected = Series(np.array(idx.tolist(), dtype="object"), name="B") + result = idx.to_series(index=[0, 1]) + assert expected.dtype == idx.dtype + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_unique.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_unique.py new file mode 100644 index 0000000000000000000000000000000000000000..3c419b23c749a16e66458b334b3aec34521c2241 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/methods/test_unique.py @@ -0,0 +1,77 @@ +from datetime import ( + datetime, + timedelta, +) + +from pandas import ( + DatetimeIndex, + NaT, + Timestamp, +) +import pandas._testing as tm + + +def test_unique(tz_naive_fixture): + idx = DatetimeIndex(["2017"] * 2, tz=tz_naive_fixture) + expected = idx[:1] + + result = idx.unique() + tm.assert_index_equal(result, expected) + # GH#21737 + # Ensure the underlying data is consistent + assert result[0] == expected[0] + + +def test_index_unique(rand_series_with_duplicate_datetimeindex): + dups = rand_series_with_duplicate_datetimeindex + index = dups.index + + uniques = index.unique() + expected = DatetimeIndex( + [ + datetime(2000, 1, 2), + datetime(2000, 1, 3), + datetime(2000, 1, 4), + datetime(2000, 1, 5), + ], + dtype=index.dtype, + ) + assert uniques.dtype == index.dtype # sanity + tm.assert_index_equal(uniques, expected) + assert index.nunique() == 4 + + # GH#2563 + assert isinstance(uniques, DatetimeIndex) + + dups_local = index.tz_localize("US/Eastern") + dups_local.name = "foo" + result = dups_local.unique() + expected = DatetimeIndex(expected, name="foo") + expected = expected.tz_localize("US/Eastern") + assert result.tz is not None + assert result.name == "foo" + tm.assert_index_equal(result, expected) + + +def test_index_unique2(): + # NaT, note this is excluded + arr = [1370745748 + t for t in range(20)] + [NaT._value] + idx = DatetimeIndex(arr * 3) + tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) + assert idx.nunique() == 20 + assert idx.nunique(dropna=False) == 21 + + +def test_index_unique3(): + arr = [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20) + ] + [NaT] + idx = DatetimeIndex(arr * 3) + tm.assert_index_equal(idx.unique(), DatetimeIndex(arr)) + assert idx.nunique() == 20 + assert idx.nunique(dropna=False) == 21 + + +def test_is_unique_monotonic(rand_series_with_duplicate_datetimeindex): + index = rand_series_with_duplicate_datetimeindex.index + assert not index.is_unique diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_partial_slicing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_partial_slicing.py new file mode 100644 index 0000000000000000000000000000000000000000..8b493fc61cb5873532e2e8393007533ee6cb8e4f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -0,0 +1,466 @@ +""" test partial slicing on Series/Frame """ + +from datetime import datetime + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestSlicing: + def test_string_index_series_name_converted(self): + # GH#1644 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=date_range("1/1/2000", periods=10), + ) + + result = df.loc["1/3/2000"] + assert result.name == df.index[2] + + result = df.T["1/3/2000"] + assert result.name == df.index[2] + + def test_stringified_slice_with_tz(self): + # GH#2658 + start = "2013-01-07" + idx = date_range(start=start, freq="1d", periods=10, tz="US/Eastern") + df = DataFrame(np.arange(10), index=idx) + df["2013-01-14 23:44:34.437768-05:00":] # no exception here + + def test_return_type_doesnt_depend_on_monotonicity(self): + # GH#24892 we get Series back regardless of whether our DTI is monotonic + dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3) + ser = Series(range(3), index=dti) + + # non-monotonic index + ser2 = Series(range(3), index=[dti[1], dti[0], dti[2]]) + + # key with resolution strictly lower than "min" + key = "2015-5-14 00" + + # monotonic increasing index + result = ser.loc[key] + expected = ser.iloc[1:] + tm.assert_series_equal(result, expected) + + # monotonic decreasing index + result = ser.iloc[::-1].loc[key] + expected = ser.iloc[::-1][:-1] + tm.assert_series_equal(result, expected) + + # non-monotonic index + result2 = ser2.loc[key] + expected2 = ser2.iloc[::2] + tm.assert_series_equal(result2, expected2) + + def test_return_type_doesnt_depend_on_monotonicity_higher_reso(self): + # GH#24892 we get Series back regardless of whether our DTI is monotonic + dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3) + ser = Series(range(3), index=dti) + + # non-monotonic index + ser2 = Series(range(3), index=[dti[1], dti[0], dti[2]]) + + # key with resolution strictly *higher) than "min" + key = "2015-5-14 00:00:00" + + # monotonic increasing index + result = ser.loc[key] + assert result == 1 + + # monotonic decreasing index + result = ser.iloc[::-1].loc[key] + assert result == 1 + + # non-monotonic index + result2 = ser2.loc[key] + assert result2 == 0 + + def test_monotone_DTI_indexing_bug(self): + # GH 19362 + # Testing accessing the first element in a monotonic descending + # partial string indexing. + + df = DataFrame(list(range(5))) + date_list = [ + "2018-01-02", + "2017-02-10", + "2016-03-10", + "2015-03-15", + "2014-03-16", + ] + date_index = DatetimeIndex(date_list) + df["date"] = date_index + expected = DataFrame({0: list(range(5)), "date": date_index}) + tm.assert_frame_equal(df, expected) + + # We get a slice because df.index's resolution is hourly and we + # are slicing with a daily-resolution string. If both were daily, + # we would get a single item back + dti = date_range("20170101 01:00:00", periods=3) + df = DataFrame({"A": [1, 2, 3]}, index=dti[::-1]) + + expected = DataFrame({"A": 1}, index=dti[-1:][::-1]) + result = df.loc["2017-01-03"] + tm.assert_frame_equal(result, expected) + + result2 = df.iloc[::-1].loc["2017-01-03"] + expected2 = expected.iloc[::-1] + tm.assert_frame_equal(result2, expected2) + + def test_slice_year(self): + dti = date_range(freq="B", start=datetime(2005, 1, 1), periods=500) + + s = Series(np.arange(len(dti)), index=dti) + result = s["2005"] + expected = s[s.index.year == 2005] + tm.assert_series_equal(result, expected) + + df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) + result = df.loc["2005"] + expected = df[df.index.year == 2005] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "partial_dtime", + [ + "2019", + "2019Q4", + "Dec 2019", + "2019-12-31", + "2019-12-31 23", + "2019-12-31 23:59", + ], + ) + def test_slice_end_of_period_resolution(self, partial_dtime): + # GH#31064 + dti = date_range("2019-12-31 23:59:55.999999999", periods=10, freq="s") + + ser = Series(range(10), index=dti) + result = ser[partial_dtime] + expected = ser.iloc[:5] + tm.assert_series_equal(result, expected) + + def test_slice_quarter(self): + dti = date_range(freq="D", start=datetime(2000, 6, 1), periods=500) + + s = Series(np.arange(len(dti)), index=dti) + assert len(s["2001Q1"]) == 90 + + df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) + assert len(df.loc["1Q01"]) == 90 + + def test_slice_month(self): + dti = date_range(freq="D", start=datetime(2005, 1, 1), periods=500) + s = Series(np.arange(len(dti)), index=dti) + assert len(s["2005-11"]) == 30 + + df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti) + assert len(df.loc["2005-11"]) == 30 + + tm.assert_series_equal(s["2005-11"], s["11-2005"]) + + def test_partial_slice(self): + rng = date_range(freq="D", start=datetime(2005, 1, 1), periods=500) + s = Series(np.arange(len(rng)), index=rng) + + result = s["2005-05":"2006-02"] + expected = s["20050501":"20060228"] + tm.assert_series_equal(result, expected) + + result = s["2005-05":] + expected = s["20050501":] + tm.assert_series_equal(result, expected) + + result = s[:"2006-02"] + expected = s[:"20060228"] + tm.assert_series_equal(result, expected) + + result = s["2005-1-1"] + assert result == s.iloc[0] + + with pytest.raises(KeyError, match=r"^'2004-12-31'$"): + s["2004-12-31"] + + def test_partial_slice_daily(self): + rng = date_range(freq="h", start=datetime(2005, 1, 31), periods=500) + s = Series(np.arange(len(rng)), index=rng) + + result = s["2005-1-31"] + tm.assert_series_equal(result, s.iloc[:24]) + + with pytest.raises(KeyError, match=r"^'2004-12-31 00'$"): + s["2004-12-31 00"] + + def test_partial_slice_hourly(self): + rng = date_range(freq="min", start=datetime(2005, 1, 1, 20, 0, 0), periods=500) + s = Series(np.arange(len(rng)), index=rng) + + result = s["2005-1-1"] + tm.assert_series_equal(result, s.iloc[: 60 * 4]) + + result = s["2005-1-1 20"] + tm.assert_series_equal(result, s.iloc[:60]) + + assert s["2005-1-1 20:00"] == s.iloc[0] + with pytest.raises(KeyError, match=r"^'2004-12-31 00:15'$"): + s["2004-12-31 00:15"] + + def test_partial_slice_minutely(self): + rng = date_range(freq="s", start=datetime(2005, 1, 1, 23, 59, 0), periods=500) + s = Series(np.arange(len(rng)), index=rng) + + result = s["2005-1-1 23:59"] + tm.assert_series_equal(result, s.iloc[:60]) + + result = s["2005-1-1"] + tm.assert_series_equal(result, s.iloc[:60]) + + assert s[Timestamp("2005-1-1 23:59:00")] == s.iloc[0] + with pytest.raises(KeyError, match=r"^'2004-12-31 00:00:00'$"): + s["2004-12-31 00:00:00"] + + def test_partial_slice_second_precision(self): + rng = date_range( + start=datetime(2005, 1, 1, 0, 0, 59, microsecond=999990), + periods=20, + freq="us", + ) + s = Series(np.arange(20), rng) + + tm.assert_series_equal(s["2005-1-1 00:00"], s.iloc[:10]) + tm.assert_series_equal(s["2005-1-1 00:00:59"], s.iloc[:10]) + + tm.assert_series_equal(s["2005-1-1 00:01"], s.iloc[10:]) + tm.assert_series_equal(s["2005-1-1 00:01:00"], s.iloc[10:]) + + assert s[Timestamp("2005-1-1 00:00:59.999990")] == s.iloc[0] + with pytest.raises(KeyError, match="2005-1-1 00:00:00"): + s["2005-1-1 00:00:00"] + + def test_partial_slicing_dataframe(self): + # GH14856 + # Test various combinations of string slicing resolution vs. + # index resolution + # - If string resolution is less precise than index resolution, + # string is considered a slice + # - If string resolution is equal to or more precise than index + # resolution, string is considered an exact match + formats = [ + "%Y", + "%Y-%m", + "%Y-%m-%d", + "%Y-%m-%d %H", + "%Y-%m-%d %H:%M", + "%Y-%m-%d %H:%M:%S", + ] + resolutions = ["year", "month", "day", "hour", "minute", "second"] + for rnum, resolution in enumerate(resolutions[2:], 2): + # we check only 'day', 'hour', 'minute' and 'second' + unit = Timedelta("1 " + resolution) + middate = datetime(2012, 1, 1, 0, 0, 0) + index = DatetimeIndex([middate - unit, middate, middate + unit]) + values = [1, 2, 3] + df = DataFrame({"a": values}, index, dtype=np.int64) + assert df.index.resolution == resolution + + # Timestamp with the same resolution as index + # Should be exact match for Series (return scalar) + # and raise KeyError for Frame + for timestamp, expected in zip(index, values): + ts_string = timestamp.strftime(formats[rnum]) + # make ts_string as precise as index + result = df["a"][ts_string] + assert isinstance(result, np.int64) + assert result == expected + msg = rf"^'{ts_string}'$" + with pytest.raises(KeyError, match=msg): + df[ts_string] + + # Timestamp with resolution less precise than index + for fmt in formats[:rnum]: + for element, theslice in [[0, slice(None, 1)], [1, slice(1, None)]]: + ts_string = index[element].strftime(fmt) + + # Series should return slice + result = df["a"][ts_string] + expected = df["a"][theslice] + tm.assert_series_equal(result, expected) + + # pre-2.0 df[ts_string] was overloaded to interpret this + # as slicing along index + with pytest.raises(KeyError, match=ts_string): + df[ts_string] + + # Timestamp with resolution more precise than index + # Compatible with existing key + # Should return scalar for Series + # and raise KeyError for Frame + for fmt in formats[rnum + 1 :]: + ts_string = index[1].strftime(fmt) + result = df["a"][ts_string] + assert isinstance(result, np.int64) + assert result == 2 + msg = rf"^'{ts_string}'$" + with pytest.raises(KeyError, match=msg): + df[ts_string] + + # Not compatible with existing key + # Should raise KeyError + for fmt, res in list(zip(formats, resolutions))[rnum + 1 :]: + ts = index[1] + Timedelta("1 " + res) + ts_string = ts.strftime(fmt) + msg = rf"^'{ts_string}'$" + with pytest.raises(KeyError, match=msg): + df["a"][ts_string] + with pytest.raises(KeyError, match=msg): + df[ts_string] + + def test_partial_slicing_with_multiindex(self): + # GH 4758 + # partial string indexing with a multi-index buggy + df = DataFrame( + { + "ACCOUNT": ["ACCT1", "ACCT1", "ACCT1", "ACCT2"], + "TICKER": ["ABC", "MNP", "XYZ", "XYZ"], + "val": [1, 2, 3, 4], + }, + index=date_range("2013-06-19 09:30:00", periods=4, freq="5min"), + ) + df_multi = df.set_index(["ACCOUNT", "TICKER"], append=True) + + expected = DataFrame( + [[1]], index=Index(["ABC"], name="TICKER"), columns=["val"] + ) + result = df_multi.loc[("2013-06-19 09:30:00", "ACCT1")] + tm.assert_frame_equal(result, expected) + + expected = df_multi.loc[ + (Timestamp("2013-06-19 09:30:00", tz=None), "ACCT1", "ABC") + ] + result = df_multi.loc[("2013-06-19 09:30:00", "ACCT1", "ABC")] + tm.assert_series_equal(result, expected) + + # partial string indexing on first level, scalar indexing on the other two + result = df_multi.loc[("2013-06-19", "ACCT1", "ABC")] + expected = df_multi.iloc[:1].droplevel([1, 2]) + tm.assert_frame_equal(result, expected) + + def test_partial_slicing_with_multiindex_series(self): + # GH 4294 + # partial slice on a series mi + ser = Series( + range(250), + index=MultiIndex.from_product( + [date_range("2000-1-1", periods=50), range(5)] + ), + ) + + s2 = ser[:-1].copy() + expected = s2["2000-1-4"] + result = s2[Timestamp("2000-1-4")] + tm.assert_series_equal(result, expected) + + result = ser[Timestamp("2000-1-4")] + expected = ser["2000-1-4"] + tm.assert_series_equal(result, expected) + + df2 = DataFrame(ser) + expected = df2.xs("2000-1-4") + result = df2.loc[Timestamp("2000-1-4")] + tm.assert_frame_equal(result, expected) + + def test_partial_slice_requires_monotonicity(self): + # Disallowed since 2.0 (GH 37819) + ser = Series(np.arange(10), date_range("2014-01-01", periods=10)) + + nonmonotonic = ser.iloc[[3, 5, 4]] + timestamp = Timestamp("2014-01-10") + with pytest.raises( + KeyError, match="Value based partial slicing on non-monotonic" + ): + nonmonotonic["2014-01-10":] + + with pytest.raises(KeyError, match=r"Timestamp\('2014-01-10 00:00:00'\)"): + nonmonotonic[timestamp:] + + with pytest.raises( + KeyError, match="Value based partial slicing on non-monotonic" + ): + nonmonotonic.loc["2014-01-10":] + + with pytest.raises(KeyError, match=r"Timestamp\('2014-01-10 00:00:00'\)"): + nonmonotonic.loc[timestamp:] + + def test_loc_datetime_length_one(self): + # GH16071 + df = DataFrame( + columns=["1"], + index=date_range("2016-10-01T00:00:00", "2016-10-01T23:59:59"), + ) + result = df.loc[datetime(2016, 10, 1) :] + tm.assert_frame_equal(result, df) + + result = df.loc["2016-10-01T00:00:00":] + tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize( + "start", + [ + "2018-12-02 21:50:00+00:00", + Timestamp("2018-12-02 21:50:00+00:00"), + Timestamp("2018-12-02 21:50:00+00:00").to_pydatetime(), + ], + ) + @pytest.mark.parametrize( + "end", + [ + "2018-12-02 21:52:00+00:00", + Timestamp("2018-12-02 21:52:00+00:00"), + Timestamp("2018-12-02 21:52:00+00:00").to_pydatetime(), + ], + ) + def test_getitem_with_datestring_with_UTC_offset(self, start, end): + # GH 24076 + idx = date_range( + start="2018-12-02 14:50:00-07:00", + end="2018-12-02 14:50:00-07:00", + freq="1min", + ) + df = DataFrame(1, index=idx, columns=["A"]) + result = df[start:end] + expected = df.iloc[0:3, :] + tm.assert_frame_equal(result, expected) + + # GH 16785 + start = str(start) + end = str(end) + with pytest.raises(ValueError, match="Both dates must"): + df[start : end[:-4] + "1:00"] + + with pytest.raises(ValueError, match="The index must be timezone"): + df = df.tz_localize(None) + df[start:end] + + def test_slice_reduce_to_series(self): + # GH 27516 + df = DataFrame( + {"A": range(24)}, index=date_range("2000", periods=24, freq="ME") + ) + expected = Series( + range(12), index=date_range("2000", periods=12, freq="ME"), name="A" + ) + result = df.loc["2000", "A"] + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..15062aee56e3a1b91d1f6eb76a4f86e381e0ad44 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/conftest.py @@ -0,0 +1,27 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + MultiIndex, +) + + +# Note: identical the "multi" entry in the top-level "index" fixture +@pytest.fixture +def idx(): + # a MultiIndex used to test the general functionality of the + # general functionality of this object + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 1, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + index_names = ["first", "second"] + mi = MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=index_names, + verify_integrity=False, + ) + return mi diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_compat.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..27a8c6e9b715880a57e711e8eab457ae553a4867 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_compat.py @@ -0,0 +1,122 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import MultiIndex +import pandas._testing as tm + + +def test_numeric_compat(idx): + with pytest.raises(TypeError, match="cannot perform __mul__"): + idx * 1 + + with pytest.raises(TypeError, match="cannot perform __rmul__"): + 1 * idx + + div_err = "cannot perform __truediv__" + with pytest.raises(TypeError, match=div_err): + idx / 1 + + div_err = div_err.replace(" __", " __r") + with pytest.raises(TypeError, match=div_err): + 1 / idx + + with pytest.raises(TypeError, match="cannot perform __floordiv__"): + idx // 1 + + with pytest.raises(TypeError, match="cannot perform __rfloordiv__"): + 1 // idx + + +@pytest.mark.parametrize("method", ["all", "any", "__invert__"]) +def test_logical_compat(idx, method): + msg = f"cannot perform {method}" + + with pytest.raises(TypeError, match=msg): + getattr(idx, method)() + + +def test_inplace_mutation_resets_values(): + levels = [["a", "b", "c"], [4]] + levels2 = [[1, 2, 3], ["a"]] + codes = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]] + + mi1 = MultiIndex(levels=levels, codes=codes) + mi2 = MultiIndex(levels=levels2, codes=codes) + + # instantiating MultiIndex should not access/cache _.values + assert "_values" not in mi1._cache + assert "_values" not in mi2._cache + + vals = mi1.values.copy() + vals2 = mi2.values.copy() + + # accessing .values should cache ._values + assert mi1._values is mi1._cache["_values"] + assert mi1.values is mi1._cache["_values"] + assert isinstance(mi1._cache["_values"], np.ndarray) + + # Make sure level setting works + new_vals = mi1.set_levels(levels2).values + tm.assert_almost_equal(vals2, new_vals) + + # Doesn't drop _values from _cache [implementation detail] + tm.assert_almost_equal(mi1._cache["_values"], vals) + + # ...and values is still same too + tm.assert_almost_equal(mi1.values, vals) + + # Make sure label setting works too + codes2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]] + exp_values = np.empty((6,), dtype=object) + exp_values[:] = [(1, "a")] * 6 + + # Must be 1d array of tuples + assert exp_values.shape == (6,) + + new_mi = mi2.set_codes(codes2) + assert "_values" not in new_mi._cache + new_values = new_mi.values + assert "_values" in new_mi._cache + + # Shouldn't change cache + tm.assert_almost_equal(mi2._cache["_values"], vals2) + + # Should have correct values + tm.assert_almost_equal(exp_values, new_values) + + +def test_boxable_categorical_values(): + cat = pd.Categorical(pd.date_range("2012-01-01", periods=3, freq="h")) + result = MultiIndex.from_product([["a", "b", "c"], cat]).values + expected = pd.Series( + [ + ("a", pd.Timestamp("2012-01-01 00:00:00")), + ("a", pd.Timestamp("2012-01-01 01:00:00")), + ("a", pd.Timestamp("2012-01-01 02:00:00")), + ("b", pd.Timestamp("2012-01-01 00:00:00")), + ("b", pd.Timestamp("2012-01-01 01:00:00")), + ("b", pd.Timestamp("2012-01-01 02:00:00")), + ("c", pd.Timestamp("2012-01-01 00:00:00")), + ("c", pd.Timestamp("2012-01-01 01:00:00")), + ("c", pd.Timestamp("2012-01-01 02:00:00")), + ] + ).values + tm.assert_numpy_array_equal(result, expected) + result = pd.DataFrame({"a": ["a", "b", "c"], "b": cat, "c": np.array(cat)}).values + expected = pd.DataFrame( + { + "a": ["a", "b", "c"], + "b": [ + pd.Timestamp("2012-01-01 00:00:00"), + pd.Timestamp("2012-01-01 01:00:00"), + pd.Timestamp("2012-01-01 02:00:00"), + ], + "c": [ + pd.Timestamp("2012-01-01 00:00:00"), + pd.Timestamp("2012-01-01 01:00:00"), + pd.Timestamp("2012-01-01 02:00:00"), + ], + } + ).values + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_constructors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..8456e6a7acba5a160556f6f3192fa795ce678786 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_constructors.py @@ -0,0 +1,860 @@ +from datetime import ( + date, + datetime, +) +import itertools + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + +import pandas as pd +from pandas import ( + Index, + MultiIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +def test_constructor_single_level(): + result = MultiIndex( + levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"] + ) + assert isinstance(result, MultiIndex) + expected = Index(["foo", "bar", "baz", "qux"], name="first") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["first"] + + +def test_constructor_no_levels(): + msg = "non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex(levels=[], codes=[]) + + msg = "Must pass both levels and codes" + with pytest.raises(TypeError, match=msg): + MultiIndex(levels=[]) + with pytest.raises(TypeError, match=msg): + MultiIndex(codes=[]) + + +def test_constructor_nonhashable_names(): + # GH 20527 + levels = [[1, 2], ["one", "two"]] + codes = [[0, 0, 1, 1], [0, 1, 0, 1]] + names = (["foo"], ["bar"]) + msg = r"MultiIndex\.name must be a hashable type" + with pytest.raises(TypeError, match=msg): + MultiIndex(levels=levels, codes=codes, names=names) + + # With .rename() + mi = MultiIndex( + levels=[[1, 2], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=("foo", "bar"), + ) + renamed = [["fooo"], ["barr"]] + with pytest.raises(TypeError, match=msg): + mi.rename(names=renamed) + + # With .set_names() + with pytest.raises(TypeError, match=msg): + mi.set_names(names=renamed) + + +def test_constructor_mismatched_codes_levels(idx): + codes = [np.array([1]), np.array([2]), np.array([3])] + levels = ["a"] + + msg = "Length of levels and codes must be the same" + with pytest.raises(ValueError, match=msg): + MultiIndex(levels=levels, codes=codes) + + length_error = ( + r"On level 0, code max \(3\) >= length of level \(1\)\. " + "NOTE: this index is in an inconsistent state" + ) + label_error = r"Unequal code lengths: \[4, 2\]" + code_value_error = r"On level 0, code value \(-2\) < -1" + + # important to check that it's looking at the right thing. + with pytest.raises(ValueError, match=length_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, 1, 2, 3], [0, 3, 4, 1]]) + + with pytest.raises(ValueError, match=label_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, 0, 0, 0], [0, 0]]) + + # external API + with pytest.raises(ValueError, match=length_error): + idx.copy().set_levels([["a"], ["b"]]) + + with pytest.raises(ValueError, match=label_error): + idx.copy().set_codes([[0, 0, 0, 0], [0, 0]]) + + # test set_codes with verify_integrity=False + # the setting should not raise any value error + idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]], verify_integrity=False) + + # code value smaller than -1 + with pytest.raises(ValueError, match=code_value_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, -2], [0, 0]]) + + +def test_na_levels(): + # GH26408 + # test if codes are re-assigned value -1 for levels + # with missing values (NaN, NaT, None) + result = MultiIndex( + levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[0, -1, 1, 2, 3, 4]] + ) + expected = MultiIndex( + levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[-1, -1, -1, -1, 3, 4]] + ) + tm.assert_index_equal(result, expected) + + result = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[0, -1, 1, 2, 3, 4]] + ) + expected = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[-1, -1, 1, -1, 3, -1]] + ) + tm.assert_index_equal(result, expected) + + # verify set_levels and set_codes + result = MultiIndex( + levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]] + ).set_levels([[np.nan, "s", pd.NaT, 128, None]]) + tm.assert_index_equal(result, expected) + + result = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[1, 2, 2, 2, 2, 2]] + ).set_codes([[0, -1, 1, 2, 3, 4]]) + tm.assert_index_equal(result, expected) + + +def test_copy_in_constructor(): + levels = np.array(["a", "b", "c"]) + codes = np.array([1, 1, 2, 0, 0, 1, 1]) + val = codes[0] + mi = MultiIndex(levels=[levels, levels], codes=[codes, codes], copy=True) + assert mi.codes[0][0] == val + codes[0] = 15 + assert mi.codes[0][0] == val + val = levels[0] + levels[0] = "PANDA" + assert mi.levels[0][0] == val + + +# ---------------------------------------------------------------------------- +# from_arrays +# ---------------------------------------------------------------------------- +def test_from_arrays(idx): + arrays = [ + np.asarray(lev).take(level_codes) + for lev, level_codes in zip(idx.levels, idx.codes) + ] + + # list of arrays as input + result = MultiIndex.from_arrays(arrays, names=idx.names) + tm.assert_index_equal(result, idx) + + # infer correctly + result = MultiIndex.from_arrays([[pd.NaT, Timestamp("20130101")], ["a", "b"]]) + assert result.levels[0].equals(Index([Timestamp("20130101")])) + assert result.levels[1].equals(Index(["a", "b"])) + + +def test_from_arrays_iterator(idx): + # GH 18434 + arrays = [ + np.asarray(lev).take(level_codes) + for lev, level_codes in zip(idx.levels, idx.codes) + ] + + # iterator as input + result = MultiIndex.from_arrays(iter(arrays), names=idx.names) + tm.assert_index_equal(result, idx) + + # invalid iterator input + msg = "Input must be a list / sequence of array-likes." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_arrays(0) + + +def test_from_arrays_tuples(idx): + arrays = tuple( + tuple(np.asarray(lev).take(level_codes)) + for lev, level_codes in zip(idx.levels, idx.codes) + ) + + # tuple of tuples as input + result = MultiIndex.from_arrays(arrays, names=idx.names) + tm.assert_index_equal(result, idx) + + +@pytest.mark.parametrize( + ("idx1", "idx2"), + [ + ( + pd.period_range("2011-01-01", freq="D", periods=3), + pd.period_range("2015-01-01", freq="h", periods=3), + ), + ( + date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"), + date_range("2015-01-01 10:00", freq="h", periods=3, tz="Asia/Tokyo"), + ), + ( + pd.timedelta_range("1 days", freq="D", periods=3), + pd.timedelta_range("2 hours", freq="h", periods=3), + ), + ], +) +def test_from_arrays_index_series_period_datetimetz_and_timedelta(idx1, idx2): + result = MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + tm.assert_index_equal(result, result2) + + +def test_from_arrays_index_datetimelike_mixed(): + idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern") + idx2 = date_range("2015-01-01 10:00", freq="h", periods=3) + idx3 = pd.timedelta_range("1 days", freq="D", periods=3) + idx4 = pd.period_range("2011-01-01", freq="D", periods=3) + + result = MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + tm.assert_index_equal(result.get_level_values(2), idx3) + tm.assert_index_equal(result.get_level_values(3), idx4) + + result2 = MultiIndex.from_arrays( + [Series(idx1), Series(idx2), Series(idx3), Series(idx4)] + ) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + tm.assert_index_equal(result2.get_level_values(2), idx3) + tm.assert_index_equal(result2.get_level_values(3), idx4) + + tm.assert_index_equal(result, result2) + + +def test_from_arrays_index_series_categorical(): + # GH13743 + idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=False) + idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=True) + + result = MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + result3 = MultiIndex.from_arrays([idx1.values, idx2.values]) + tm.assert_index_equal(result3.get_level_values(0), idx1) + tm.assert_index_equal(result3.get_level_values(1), idx2) + + +def test_from_arrays_empty(): + # 0 levels + msg = "Must pass non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_arrays(arrays=[]) + + # 1 level + result = MultiIndex.from_arrays(arrays=[[]], names=["A"]) + assert isinstance(result, MultiIndex) + expected = Index([], name="A") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["A"] + + # N levels + for N in [2, 3]: + arrays = [[]] * N + names = list("ABC")[:N] + result = MultiIndex.from_arrays(arrays=arrays, names=names) + expected = MultiIndex(levels=[[]] * N, codes=[[]] * N, names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "invalid_sequence_of_arrays", + [ + 1, + [1], + [1, 2], + [[1], 2], + [1, [2]], + "a", + ["a"], + ["a", "b"], + [["a"], "b"], + (1,), + (1, 2), + ([1], 2), + (1, [2]), + "a", + ("a",), + ("a", "b"), + (["a"], "b"), + [(1,), 2], + [1, (2,)], + [("a",), "b"], + ((1,), 2), + (1, (2,)), + (("a",), "b"), + ], +) +def test_from_arrays_invalid_input(invalid_sequence_of_arrays): + msg = "Input must be a list / sequence of array-likes" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays) + + +@pytest.mark.parametrize( + "idx1, idx2", [([1, 2, 3], ["a", "b"]), ([], ["a", "b"]), ([1, 2, 3], [])] +) +def test_from_arrays_different_lengths(idx1, idx2): + # see gh-13599 + msg = "^all arrays must be same length$" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_arrays([idx1, idx2]) + + +def test_from_arrays_respects_none_names(): + # GH27292 + a = Series([1, 2, 3], name="foo") + b = Series(["a", "b", "c"], name="bar") + + result = MultiIndex.from_arrays([a, b], names=None) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b", "c"]], codes=[[0, 1, 2], [0, 1, 2]], names=None + ) + + tm.assert_index_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# from_tuples +# ---------------------------------------------------------------------------- +def test_from_tuples(): + msg = "Cannot infer number of levels from empty list" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_tuples([]) + + expected = MultiIndex( + levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"] + ) + + # input tuples + result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=["a", "b"]) + tm.assert_index_equal(result, expected) + + +def test_from_tuples_iterator(): + # GH 18434 + # input iterator for tuples + expected = MultiIndex( + levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"] + ) + + result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=["a", "b"]) + tm.assert_index_equal(result, expected) + + # input non-iterables + msg = "Input must be a list / sequence of tuple-likes." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_tuples(0) + + +def test_from_tuples_empty(): + # GH 16777 + result = MultiIndex.from_tuples([], names=["a", "b"]) + expected = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"]) + tm.assert_index_equal(result, expected) + + +def test_from_tuples_index_values(idx): + result = MultiIndex.from_tuples(idx) + assert (result.values == idx.values).all() + + +def test_tuples_with_name_string(): + # GH 15110 and GH 14848 + + li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] + msg = "Names should be list-like for a MultiIndex" + with pytest.raises(ValueError, match=msg): + Index(li, name="abc") + with pytest.raises(ValueError, match=msg): + Index(li, name="a") + + +def test_from_tuples_with_tuple_label(): + # GH 15457 + expected = pd.DataFrame( + [[2, 1, 2], [4, (1, 2), 3]], columns=["a", "b", "c"] + ).set_index(["a", "b"]) + idx = MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=("a", "b")) + result = pd.DataFrame([2, 3], columns=["c"], index=idx) + tm.assert_frame_equal(expected, result) + + +# ---------------------------------------------------------------------------- +# from_product +# ---------------------------------------------------------------------------- +def test_from_product_empty_zero_levels(): + # 0 levels + msg = "Must pass non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_product([]) + + +def test_from_product_empty_one_level(): + result = MultiIndex.from_product([[]], names=["A"]) + expected = Index([], name="A") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["A"] + + +@pytest.mark.parametrize( + "first, second", [([], []), (["foo", "bar", "baz"], []), ([], ["a", "b", "c"])] +) +def test_from_product_empty_two_levels(first, second): + names = ["A", "B"] + result = MultiIndex.from_product([first, second], names=names) + expected = MultiIndex(levels=[first, second], codes=[[], []], names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("N", list(range(4))) +def test_from_product_empty_three_levels(N): + # GH12258 + names = ["A", "B", "C"] + lvl2 = list(range(N)) + result = MultiIndex.from_product([[], lvl2, []], names=names) + expected = MultiIndex(levels=[[], lvl2, []], codes=[[], [], []], names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "invalid_input", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]] +) +def test_from_product_invalid_input(invalid_input): + msg = r"Input must be a list / sequence of iterables|Input must be list-like" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_product(iterables=invalid_input) + + +def test_from_product_datetimeindex(): + dt_index = date_range("2000-01-01", periods=2) + mi = MultiIndex.from_product([[1, 2], dt_index]) + etalon = construct_1d_object_array_from_listlike( + [ + (1, Timestamp("2000-01-01")), + (1, Timestamp("2000-01-02")), + (2, Timestamp("2000-01-01")), + (2, Timestamp("2000-01-02")), + ] + ) + tm.assert_numpy_array_equal(mi.values, etalon) + + +def test_from_product_rangeindex(): + # RangeIndex is preserved by factorize, so preserved in levels + rng = Index(range(5)) + other = ["a", "b"] + mi = MultiIndex.from_product([rng, other]) + tm.assert_index_equal(mi._levels[0], rng, exact=True) + + +@pytest.mark.parametrize("ordered", [False, True]) +@pytest.mark.parametrize("f", [lambda x: x, lambda x: Series(x), lambda x: x.values]) +def test_from_product_index_series_categorical(ordered, f): + # GH13743 + first = ["foo", "bar"] + + idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=ordered) + expected = pd.CategoricalIndex( + list("abcaab") + list("abcaab"), categories=list("bac"), ordered=ordered + ) + + result = MultiIndex.from_product([first, f(idx)]) + tm.assert_index_equal(result.get_level_values(1), expected) + + +def test_from_product(): + first = ["foo", "bar", "buz"] + second = ["a", "b", "c"] + names = ["first", "second"] + result = MultiIndex.from_product([first, second], names=names) + + tuples = [ + ("foo", "a"), + ("foo", "b"), + ("foo", "c"), + ("bar", "a"), + ("bar", "b"), + ("bar", "c"), + ("buz", "a"), + ("buz", "b"), + ("buz", "c"), + ] + expected = MultiIndex.from_tuples(tuples, names=names) + + tm.assert_index_equal(result, expected) + + +def test_from_product_iterator(): + # GH 18434 + first = ["foo", "bar", "buz"] + second = ["a", "b", "c"] + names = ["first", "second"] + tuples = [ + ("foo", "a"), + ("foo", "b"), + ("foo", "c"), + ("bar", "a"), + ("bar", "b"), + ("bar", "c"), + ("buz", "a"), + ("buz", "b"), + ("buz", "c"), + ] + expected = MultiIndex.from_tuples(tuples, names=names) + + # iterator as input + result = MultiIndex.from_product(iter([first, second]), names=names) + tm.assert_index_equal(result, expected) + + # Invalid non-iterable input + msg = "Input must be a list / sequence of iterables." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_product(0) + + +@pytest.mark.parametrize( + "a, b, expected_names", + [ + ( + Series([1, 2, 3], name="foo"), + Series(["a", "b"], name="bar"), + ["foo", "bar"], + ), + (Series([1, 2, 3], name="foo"), ["a", "b"], ["foo", None]), + ([1, 2, 3], ["a", "b"], None), + ], +) +def test_from_product_infer_names(a, b, expected_names): + # GH27292 + result = MultiIndex.from_product([a, b]) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b"]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + names=expected_names, + ) + tm.assert_index_equal(result, expected) + + +def test_from_product_respects_none_names(): + # GH27292 + a = Series([1, 2, 3], name="foo") + b = Series(["a", "b"], name="bar") + + result = MultiIndex.from_product([a, b], names=None) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b"]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + names=None, + ) + tm.assert_index_equal(result, expected) + + +def test_from_product_readonly(): + # GH#15286 passing read-only array to from_product + a = np.array(range(3)) + b = ["a", "b"] + expected = MultiIndex.from_product([a, b]) + + a.setflags(write=False) + result = MultiIndex.from_product([a, b]) + tm.assert_index_equal(result, expected) + + +def test_create_index_existing_name(idx): + # GH11193, when an existing index is passed, and a new name is not + # specified, the new index should inherit the previous object name + index = idx + index.names = ["foo", "bar"] + result = Index(index) + expected = Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ) + ) + tm.assert_index_equal(result, expected) + + result = Index(index, name="A") + expected = Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ), + name="A", + ) + tm.assert_index_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# from_frame +# ---------------------------------------------------------------------------- +def test_from_frame(): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], columns=["L1", "L2"] + ) + expected = MultiIndex.from_tuples( + [("a", "a"), ("a", "b"), ("b", "a"), ("b", "b")], names=["L1", "L2"] + ) + result = MultiIndex.from_frame(df) + tm.assert_index_equal(expected, result) + + +def test_from_frame_missing_values_multiIndex(): + # GH 39984 + pa = pytest.importorskip("pyarrow") + + df = pd.DataFrame( + { + "a": Series([1, 2, None], dtype="Int64"), + "b": pd.Float64Dtype().__from_arrow__(pa.array([0.2, np.nan, None])), + } + ) + multi_indexed = MultiIndex.from_frame(df) + expected = MultiIndex.from_arrays( + [ + Series([1, 2, None]).astype("Int64"), + pd.Float64Dtype().__from_arrow__(pa.array([0.2, np.nan, None])), + ], + names=["a", "b"], + ) + tm.assert_index_equal(multi_indexed, expected) + + +@pytest.mark.parametrize( + "non_frame", + [ + Series([1, 2, 3, 4]), + [1, 2, 3, 4], + [[1, 2], [3, 4], [5, 6]], + Index([1, 2, 3, 4]), + np.array([[1, 2], [3, 4], [5, 6]]), + 27, + ], +) +def test_from_frame_error(non_frame): + # GH 22420 + with pytest.raises(TypeError, match="Input must be a DataFrame"): + MultiIndex.from_frame(non_frame) + + +def test_from_frame_dtype_fidelity(): + # GH 22420 + df = pd.DataFrame( + { + "dates": date_range("19910905", periods=6, tz="US/Eastern"), + "a": [1, 1, 1, 2, 2, 2], + "b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + "c": ["x", "x", "y", "z", "x", "y"], + } + ) + original_dtypes = df.dtypes.to_dict() + + expected_mi = MultiIndex.from_arrays( + [ + date_range("19910905", periods=6, tz="US/Eastern"), + [1, 1, 1, 2, 2, 2], + pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + ["x", "x", "y", "z", "x", "y"], + ], + names=["dates", "a", "b", "c"], + ) + mi = MultiIndex.from_frame(df) + mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} + + tm.assert_index_equal(expected_mi, mi) + assert original_dtypes == mi_dtypes + + +@pytest.mark.parametrize( + "names_in,names_out", [(None, [("L1", "x"), ("L2", "y")]), (["x", "y"], ["x", "y"])] +) +def test_from_frame_valid_names(names_in, names_out): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], + columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), + ) + mi = MultiIndex.from_frame(df, names=names_in) + assert mi.names == names_out + + +@pytest.mark.parametrize( + "names,expected_error_msg", + [ + ("bad_input", "Names should be list-like for a MultiIndex"), + (["a", "b", "c"], "Length of names must match number of levels in MultiIndex"), + ], +) +def test_from_frame_invalid_names(names, expected_error_msg): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], + columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), + ) + with pytest.raises(ValueError, match=expected_error_msg): + MultiIndex.from_frame(df, names=names) + + +def test_index_equal_empty_iterable(): + # #16844 + a = MultiIndex(levels=[[], []], codes=[[], []], names=["a", "b"]) + b = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"]) + tm.assert_index_equal(a, b) + + +def test_raise_invalid_sortorder(): + # Test that the MultiIndex constructor raise when a incorrect sortorder is given + # GH#28518 + + levels = [[0, 1], [0, 1, 2]] + + # Correct sortorder + MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 + ) + + with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"): + MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2 + ) + + with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"): + MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1 + ) + + +def test_datetimeindex(): + idx1 = pd.DatetimeIndex( + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo" + ) + idx2 = date_range("2010/01/01", periods=6, freq="ME", tz="US/Eastern") + idx = MultiIndex.from_arrays([idx1, idx2]) + + expected1 = pd.DatetimeIndex( + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"], tz="Asia/Tokyo" + ) + + tm.assert_index_equal(idx.levels[0], expected1) + tm.assert_index_equal(idx.levels[1], idx2) + + # from datetime combos + # GH 7888 + date1 = np.datetime64("today") + date2 = datetime.today() + date3 = Timestamp.today() + + for d1, d2 in itertools.product([date1, date2, date3], [date1, date2, date3]): + index = MultiIndex.from_product([[d1], [d2]]) + assert isinstance(index.levels[0], pd.DatetimeIndex) + assert isinstance(index.levels[1], pd.DatetimeIndex) + + # but NOT date objects, matching Index behavior + date4 = date.today() + index = MultiIndex.from_product([[date4], [date2]]) + assert not isinstance(index.levels[0], pd.DatetimeIndex) + assert isinstance(index.levels[1], pd.DatetimeIndex) + + +def test_constructor_with_tz(): + index = pd.DatetimeIndex( + ["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific" + ) + columns = pd.DatetimeIndex( + ["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo" + ) + + result = MultiIndex.from_arrays([index, columns]) + + assert result.names == ["dt1", "dt2"] + tm.assert_index_equal(result.levels[0], index) + tm.assert_index_equal(result.levels[1], columns) + + result = MultiIndex.from_arrays([Series(index), Series(columns)]) + + assert result.names == ["dt1", "dt2"] + tm.assert_index_equal(result.levels[0], index) + tm.assert_index_equal(result.levels[1], columns) + + +def test_multiindex_inference_consistency(): + # check that inference behavior matches the base class + + v = date.today() + + arr = [v, v] + + idx = Index(arr) + assert idx.dtype == object + + mi = MultiIndex.from_arrays([arr]) + lev = mi.levels[0] + assert lev.dtype == object + + mi = MultiIndex.from_product([arr]) + lev = mi.levels[0] + assert lev.dtype == object + + mi = MultiIndex.from_tuples([(x,) for x in arr]) + lev = mi.levels[0] + assert lev.dtype == object + + +def test_dtype_representation(using_infer_string): + # GH#46900 + pmidx = MultiIndex.from_arrays([[1], ["a"]], names=[("a", "b"), ("c", "d")]) + result = pmidx.dtypes + exp = "object" if not using_infer_string else "string" + expected = Series( + ["int64", exp], + index=MultiIndex.from_tuples([("a", "b"), ("c", "d")]), + dtype=object, + ) + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_conversion.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..3c2ca045d6f990837fae4d2b3d7bcbbf40e175e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_conversion.py @@ -0,0 +1,164 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, +) +import pandas._testing as tm + + +def test_to_numpy(idx): + result = idx.to_numpy() + exp = idx.values + tm.assert_numpy_array_equal(result, exp) + + +def test_to_frame(): + tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")] + + index = MultiIndex.from_tuples(tuples) + result = index.to_frame(index=False) + expected = DataFrame(tuples) + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")] + index = MultiIndex.from_tuples(tuples, names=["first", "second"]) + result = index.to_frame(index=False) + expected = DataFrame(tuples) + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + # See GH-22580 + index = MultiIndex.from_tuples(tuples) + result = index.to_frame(index=False, name=["first", "second"]) + expected = DataFrame(tuples) + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + result = index.to_frame(name=["first", "second"]) + expected.index = index + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + msg = "'name' must be a list / sequence of column names." + with pytest.raises(TypeError, match=msg): + index.to_frame(name="first") + + msg = "'name' should have same length as number of levels on index." + with pytest.raises(ValueError, match=msg): + index.to_frame(name=["first"]) + + # Tests for datetime index + index = MultiIndex.from_product([range(5), pd.date_range("20130101", periods=3)]) + result = index.to_frame(index=False) + expected = DataFrame( + { + 0: np.repeat(np.arange(5, dtype="int64"), 3), + 1: np.tile(pd.date_range("20130101", periods=3), 5), + } + ) + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + # See GH-22580 + result = index.to_frame(index=False, name=["first", "second"]) + expected = DataFrame( + { + "first": np.repeat(np.arange(5, dtype="int64"), 3), + "second": np.tile(pd.date_range("20130101", periods=3), 5), + } + ) + tm.assert_frame_equal(result, expected) + + result = index.to_frame(name=["first", "second"]) + expected.index = index + tm.assert_frame_equal(result, expected) + + +def test_to_frame_dtype_fidelity(): + # GH 22420 + mi = MultiIndex.from_arrays( + [ + pd.date_range("19910905", periods=6, tz="US/Eastern"), + [1, 1, 1, 2, 2, 2], + pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + ["x", "x", "y", "z", "x", "y"], + ], + names=["dates", "a", "b", "c"], + ) + original_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} + + expected_df = DataFrame( + { + "dates": pd.date_range("19910905", periods=6, tz="US/Eastern"), + "a": [1, 1, 1, 2, 2, 2], + "b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + "c": ["x", "x", "y", "z", "x", "y"], + } + ) + df = mi.to_frame(index=False) + df_dtypes = df.dtypes.to_dict() + + tm.assert_frame_equal(df, expected_df) + assert original_dtypes == df_dtypes + + +def test_to_frame_resulting_column_order(): + # GH 22420 + expected = ["z", 0, "a"] + mi = MultiIndex.from_arrays( + [["a", "b", "c"], ["x", "y", "z"], ["q", "w", "e"]], names=expected + ) + result = mi.to_frame().columns.tolist() + assert result == expected + + +def test_to_frame_duplicate_labels(): + # GH 45245 + data = [(1, 2), (3, 4)] + names = ["a", "a"] + index = MultiIndex.from_tuples(data, names=names) + with pytest.raises(ValueError, match="Cannot create duplicate column labels"): + index.to_frame() + + result = index.to_frame(allow_duplicates=True) + expected = DataFrame(data, index=index, columns=names) + tm.assert_frame_equal(result, expected) + + names = [None, 0] + index = MultiIndex.from_tuples(data, names=names) + with pytest.raises(ValueError, match="Cannot create duplicate column labels"): + index.to_frame() + + result = index.to_frame(allow_duplicates=True) + expected = DataFrame(data, index=index, columns=[0, 0]) + tm.assert_frame_equal(result, expected) + + +def test_to_flat_index(idx): + expected = pd.Index( + ( + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ), + tupleize_cols=False, + ) + result = idx.to_flat_index() + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_copy.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_copy.py new file mode 100644 index 0000000000000000000000000000000000000000..2e09a580f9528bc8197d55c6a7533098e0129fa2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_copy.py @@ -0,0 +1,96 @@ +from copy import ( + copy, + deepcopy, +) + +import pytest + +from pandas import MultiIndex +import pandas._testing as tm + + +def assert_multiindex_copied(copy, original): + # Levels should be (at least, shallow copied) + tm.assert_copy(copy.levels, original.levels) + tm.assert_almost_equal(copy.codes, original.codes) + + # Labels doesn't matter which way copied + tm.assert_almost_equal(copy.codes, original.codes) + assert copy.codes is not original.codes + + # Names doesn't matter which way copied + assert copy.names == original.names + assert copy.names is not original.names + + # Sort order should be copied + assert copy.sortorder == original.sortorder + + +def test_copy(idx): + i_copy = idx.copy() + + assert_multiindex_copied(i_copy, idx) + + +def test_shallow_copy(idx): + i_copy = idx._view() + + assert_multiindex_copied(i_copy, idx) + + +def test_view(idx): + i_view = idx.view() + assert_multiindex_copied(i_view, idx) + + +@pytest.mark.parametrize("func", [copy, deepcopy]) +def test_copy_and_deepcopy(func): + idx = MultiIndex( + levels=[["foo", "bar"], ["fizz", "buzz"]], + codes=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=["first", "second"], + ) + idx_copy = func(idx) + assert idx_copy is not idx + assert idx_copy.equals(idx) + + +@pytest.mark.parametrize("deep", [True, False]) +def test_copy_method(deep): + idx = MultiIndex( + levels=[["foo", "bar"], ["fizz", "buzz"]], + codes=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=["first", "second"], + ) + idx_copy = idx.copy(deep=deep) + assert idx_copy.equals(idx) + + +@pytest.mark.parametrize("deep", [True, False]) +@pytest.mark.parametrize( + "kwarg, value", + [ + ("names", ["third", "fourth"]), + ], +) +def test_copy_method_kwargs(deep, kwarg, value): + # gh-12309: Check that the "name" argument as well other kwargs are honored + idx = MultiIndex( + levels=[["foo", "bar"], ["fizz", "buzz"]], + codes=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=["first", "second"], + ) + idx_copy = idx.copy(**{kwarg: value, "deep": deep}) + assert getattr(idx_copy, kwarg) == value + + +def test_copy_deep_false_retains_id(): + # GH#47878 + idx = MultiIndex( + levels=[["foo", "bar"], ["fizz", "buzz"]], + codes=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=["first", "second"], + ) + + res = idx.copy(deep=False) + assert res._id is idx._id diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_drop.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_drop.py new file mode 100644 index 0000000000000000000000000000000000000000..99c8ebb1e57b22059d5a545a79de7b8348d73b14 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_drop.py @@ -0,0 +1,190 @@ +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) +import pandas._testing as tm + + +def test_drop(idx): + dropped = idx.drop([("foo", "two"), ("qux", "one")]) + + index = MultiIndex.from_tuples([("foo", "two"), ("qux", "one")]) + dropped2 = idx.drop(index) + + expected = idx[[0, 2, 3, 5]] + tm.assert_index_equal(dropped, expected) + tm.assert_index_equal(dropped2, expected) + + dropped = idx.drop(["bar"]) + expected = idx[[0, 1, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = idx.drop("foo") + expected = idx[[2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + index = MultiIndex.from_tuples([("bar", "two")]) + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): + idx.drop([("bar", "two")]) + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): + idx.drop(index) + with pytest.raises(KeyError, match=r"^'two'$"): + idx.drop(["foo", "two"]) + + # partially correct argument + mixed_index = MultiIndex.from_tuples([("qux", "one"), ("bar", "two")]) + with pytest.raises(KeyError, match=r"^\('bar', 'two'\)$"): + idx.drop(mixed_index) + + # error='ignore' + dropped = idx.drop(index, errors="ignore") + expected = idx[[0, 1, 2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = idx.drop(mixed_index, errors="ignore") + expected = idx[[0, 1, 2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + dropped = idx.drop(["foo", "two"], errors="ignore") + expected = idx[[2, 3, 4, 5]] + tm.assert_index_equal(dropped, expected) + + # mixed partial / full drop + dropped = idx.drop(["foo", ("qux", "one")]) + expected = idx[[2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + # mixed partial / full drop / error='ignore' + mixed_index = ["foo", ("qux", "one"), "two"] + with pytest.raises(KeyError, match=r"^'two'$"): + idx.drop(mixed_index) + dropped = idx.drop(mixed_index, errors="ignore") + expected = idx[[2, 3, 5]] + tm.assert_index_equal(dropped, expected) + + +def test_droplevel_with_names(idx): + index = idx[idx.get_loc("foo")] + dropped = index.droplevel(0) + assert dropped.name == "second" + + index = MultiIndex( + levels=[Index(range(4)), Index(range(4)), Index(range(4))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + names=["one", "two", "three"], + ) + dropped = index.droplevel(0) + assert dropped.names == ("two", "three") + + dropped = index.droplevel("two") + expected = index.droplevel(1) + assert dropped.equals(expected) + + +def test_droplevel_list(): + index = MultiIndex( + levels=[Index(range(4)), Index(range(4)), Index(range(4))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + names=["one", "two", "three"], + ) + + dropped = index[:2].droplevel(["three", "one"]) + expected = index[:2].droplevel(2).droplevel(0) + assert dropped.equals(expected) + + dropped = index[:2].droplevel([]) + expected = index[:2] + assert dropped.equals(expected) + + msg = ( + "Cannot remove 3 levels from an index with 3 levels: " + "at least one level must be left" + ) + with pytest.raises(ValueError, match=msg): + index[:2].droplevel(["one", "two", "three"]) + + with pytest.raises(KeyError, match="'Level four not found'"): + index[:2].droplevel(["one", "four"]) + + +def test_drop_not_lexsorted(): + # GH 12078 + + # define the lexsorted version of the multi-index + tuples = [("a", ""), ("b1", "c1"), ("b2", "c2")] + lexsorted_mi = MultiIndex.from_tuples(tuples, names=["b", "c"]) + assert lexsorted_mi._is_lexsorted() + + # and the not-lexsorted version + df = pd.DataFrame( + columns=["a", "b", "c", "d"], data=[[1, "b1", "c1", 3], [1, "b2", "c2", 4]] + ) + df = df.pivot_table(index="a", columns=["b", "c"], values="d") + df = df.reset_index() + not_lexsorted_mi = df.columns + assert not not_lexsorted_mi._is_lexsorted() + + # compare the results + tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi) + with tm.assert_produces_warning(PerformanceWarning): + tm.assert_index_equal(lexsorted_mi.drop("a"), not_lexsorted_mi.drop("a")) + + +def test_drop_with_nan_in_index(nulls_fixture): + # GH#18853 + mi = MultiIndex.from_tuples([("blah", nulls_fixture)], names=["name", "date"]) + msg = r"labels \[Timestamp\('2001-01-01 00:00:00'\)\] not found in level" + with pytest.raises(KeyError, match=msg): + mi.drop(pd.Timestamp("2001"), level="date") + + +@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") +def test_drop_with_non_monotonic_duplicates(): + # GH#33494 + mi = MultiIndex.from_tuples([(1, 2), (2, 3), (1, 2)]) + result = mi.drop((1, 2)) + expected = MultiIndex.from_tuples([(2, 3)]) + tm.assert_index_equal(result, expected) + + +def test_single_level_drop_partially_missing_elements(): + # GH 37820 + + mi = MultiIndex.from_tuples([(1, 2), (2, 2), (3, 2)]) + msg = r"labels \[4\] not found in level" + with pytest.raises(KeyError, match=msg): + mi.drop(4, level=0) + with pytest.raises(KeyError, match=msg): + mi.drop([1, 4], level=0) + msg = r"labels \[nan\] not found in level" + with pytest.raises(KeyError, match=msg): + mi.drop([np.nan], level=0) + with pytest.raises(KeyError, match=msg): + mi.drop([np.nan, 1, 2, 3], level=0) + + mi = MultiIndex.from_tuples([(np.nan, 1), (1, 2)]) + msg = r"labels \['a'\] not found in level" + with pytest.raises(KeyError, match=msg): + mi.drop([np.nan, 1, "a"], level=0) + + +def test_droplevel_multiindex_one_level(): + # GH#37208 + index = MultiIndex.from_tuples([(2,)], names=("b",)) + result = index.droplevel([]) + expected = Index([2], name="b") + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_equivalence.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_equivalence.py new file mode 100644 index 0000000000000000000000000000000000000000..9babbd5b8d56d64d704978758efb81f8d730274f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_equivalence.py @@ -0,0 +1,284 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_any_real_numeric_dtype + +import pandas as pd +from pandas import ( + Index, + MultiIndex, + Series, +) +import pandas._testing as tm + + +def test_equals(idx): + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + assert idx.equals(idx.to_flat_index()) + assert idx.equals(idx.to_flat_index().astype("category")) + + assert not idx.equals(list(idx)) + assert not idx.equals(np.array(idx)) + + same_values = Index(idx, dtype=object) + assert idx.equals(same_values) + assert same_values.equals(idx) + + if idx.nlevels == 1: + # do not test MultiIndex + assert not idx.equals(Series(idx)) + + +def test_equals_op(idx): + # GH9947, GH10637 + index_a = idx + + n = len(index_a) + index_b = index_a[0:-1] + index_c = index_a[0:-1].append(index_a[-2:-1]) + index_d = index_a[0:1] + with pytest.raises(ValueError, match="Lengths must match"): + index_a == index_b + expected1 = np.array([True] * n) + expected2 = np.array([True] * (n - 1) + [False]) + tm.assert_numpy_array_equal(index_a == index_a, expected1) + tm.assert_numpy_array_equal(index_a == index_c, expected2) + + # test comparisons with numpy arrays + array_a = np.array(index_a) + array_b = np.array(index_a[0:-1]) + array_c = np.array(index_a[0:-1].append(index_a[-2:-1])) + array_d = np.array(index_a[0:1]) + with pytest.raises(ValueError, match="Lengths must match"): + index_a == array_b + tm.assert_numpy_array_equal(index_a == array_a, expected1) + tm.assert_numpy_array_equal(index_a == array_c, expected2) + + # test comparisons with Series + series_a = Series(array_a) + series_b = Series(array_b) + series_c = Series(array_c) + series_d = Series(array_d) + with pytest.raises(ValueError, match="Lengths must match"): + index_a == series_b + + tm.assert_numpy_array_equal(index_a == series_a, expected1) + tm.assert_numpy_array_equal(index_a == series_c, expected2) + + # cases where length is 1 for one of them + with pytest.raises(ValueError, match="Lengths must match"): + index_a == index_d + with pytest.raises(ValueError, match="Lengths must match"): + index_a == series_d + with pytest.raises(ValueError, match="Lengths must match"): + index_a == array_d + msg = "Can only compare identically-labeled Series objects" + with pytest.raises(ValueError, match=msg): + series_a == series_d + with pytest.raises(ValueError, match="Lengths must match"): + series_a == array_d + + # comparing with a scalar should broadcast; note that we are excluding + # MultiIndex because in this case each item in the index is a tuple of + # length 2, and therefore is considered an array of length 2 in the + # comparison instead of a scalar + if not isinstance(index_a, MultiIndex): + expected3 = np.array([False] * (len(index_a) - 2) + [True, False]) + # assuming the 2nd to last item is unique in the data + item = index_a[-2] + tm.assert_numpy_array_equal(index_a == item, expected3) + tm.assert_series_equal(series_a == item, Series(expected3)) + + +def test_compare_tuple(): + # GH#21517 + mi = MultiIndex.from_product([[1, 2]] * 2) + + all_false = np.array([False, False, False, False]) + + result = mi == mi[0] + expected = np.array([True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = mi != mi[0] + tm.assert_numpy_array_equal(result, ~expected) + + result = mi < mi[0] + tm.assert_numpy_array_equal(result, all_false) + + result = mi <= mi[0] + tm.assert_numpy_array_equal(result, expected) + + result = mi > mi[0] + tm.assert_numpy_array_equal(result, ~expected) + + result = mi >= mi[0] + tm.assert_numpy_array_equal(result, ~all_false) + + +def test_compare_tuple_strs(): + # GH#34180 + + mi = MultiIndex.from_tuples([("a", "b"), ("b", "c"), ("c", "a")]) + + result = mi == ("c", "a") + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = mi == ("c",) + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_equals_multi(idx): + assert idx.equals(idx) + assert not idx.equals(idx.values) + assert idx.equals(Index(idx.values)) + + assert idx.equal_levels(idx) + assert not idx.equals(idx[:-1]) + assert not idx.equals(idx[-1]) + + # different number of levels + index = MultiIndex( + levels=[Index(list(range(4))), Index(list(range(4))), Index(list(range(4)))], + codes=[ + np.array([0, 0, 1, 2, 2, 2, 3, 3]), + np.array([0, 1, 0, 0, 0, 1, 0, 1]), + np.array([1, 0, 1, 1, 0, 0, 1, 0]), + ], + ) + + index2 = MultiIndex(levels=index.levels[:-1], codes=index.codes[:-1]) + assert not index.equals(index2) + assert not index.equal_levels(index2) + + # levels are different + major_axis = Index(list(range(4))) + minor_axis = Index(list(range(2))) + + major_codes = np.array([0, 0, 1, 2, 2, 3]) + minor_codes = np.array([0, 1, 0, 0, 1, 0]) + + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + assert not idx.equals(index) + assert not idx.equal_levels(index) + + # some of the labels are different + major_axis = Index(["foo", "bar", "baz", "qux"]) + minor_axis = Index(["one", "two"]) + + major_codes = np.array([0, 0, 2, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 0, 1]) + + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + assert not idx.equals(index) + + +def test_identical(idx): + mi = idx.copy() + mi2 = idx.copy() + assert mi.identical(mi2) + + mi = mi.set_names(["new1", "new2"]) + assert mi.equals(mi2) + assert not mi.identical(mi2) + + mi2 = mi2.set_names(["new1", "new2"]) + assert mi.identical(mi2) + + mi4 = Index(mi.tolist(), tupleize_cols=False) + assert not mi.identical(mi4) + assert mi.equals(mi4) + + +def test_equals_operator(idx): + # GH9785 + assert (idx == idx).all() + + +def test_equals_missing_values(): + # make sure take is not using -1 + i = MultiIndex.from_tuples([(0, pd.NaT), (0, pd.Timestamp("20130101"))]) + result = i[0:1].equals(i[0]) + assert not result + result = i[1:2].equals(i[1]) + assert not result + + +def test_equals_missing_values_differently_sorted(): + # GH#38439 + mi1 = MultiIndex.from_tuples([(81.0, np.nan), (np.nan, np.nan)]) + mi2 = MultiIndex.from_tuples([(np.nan, np.nan), (81.0, np.nan)]) + assert not mi1.equals(mi2) + + mi2 = MultiIndex.from_tuples([(81.0, np.nan), (np.nan, np.nan)]) + assert mi1.equals(mi2) + + +def test_is_(): + mi = MultiIndex.from_tuples(zip(range(10), range(10))) + assert mi.is_(mi) + assert mi.is_(mi.view()) + assert mi.is_(mi.view().view().view().view()) + mi2 = mi.view() + # names are metadata, they don't change id + mi2.names = ["A", "B"] + assert mi2.is_(mi) + assert mi.is_(mi2) + + assert not mi.is_(mi.set_names(["C", "D"])) + # levels are inherent properties, they change identity + mi3 = mi2.set_levels([list(range(10)), list(range(10))]) + assert not mi3.is_(mi2) + # shouldn't change + assert mi2.is_(mi) + mi4 = mi3.view() + + # GH 17464 - Remove duplicate MultiIndex levels + mi4 = mi4.set_levels([list(range(10)), list(range(10))]) + assert not mi4.is_(mi3) + mi5 = mi.view() + mi5 = mi5.set_levels(mi5.levels) + assert not mi5.is_(mi) + + +def test_is_all_dates(idx): + assert not idx._is_all_dates + + +def test_is_numeric(idx): + # MultiIndex is never numeric + assert not is_any_real_numeric_dtype(idx) + + +def test_multiindex_compare(): + # GH 21149 + # Ensure comparison operations for MultiIndex with nlevels == 1 + # behave consistently with those for MultiIndex with nlevels > 1 + + midx = MultiIndex.from_product([[0, 1]]) + + # Equality self-test: MultiIndex object vs self + expected = Series([True, True]) + result = Series(midx == midx) + tm.assert_series_equal(result, expected) + + # Greater than comparison: MultiIndex object vs self + expected = Series([False, False]) + result = Series(midx > midx) + tm.assert_series_equal(result, expected) + + +def test_equals_ea_int_regular_int(): + # GH#46026 + mi1 = MultiIndex.from_arrays([Index([1, 2], dtype="Int64"), [3, 4]]) + mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]]) + assert not mi1.equals(mi2) + assert not mi2.equals(mi1) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_formats.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..52ff3109128f24f43d9a12527d08770b463459a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_formats.py @@ -0,0 +1,249 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) +import pandas._testing as tm + + +def test_format(idx): + msg = "MultiIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx.format() + idx[:0].format() + + +def test_format_integer_names(): + index = MultiIndex( + levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1] + ) + msg = "MultiIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + index.format(names=True) + + +def test_format_sparse_config(idx): + # GH1538 + msg = "MultiIndex.format is deprecated" + with pd.option_context("display.multi_sparse", False): + with tm.assert_produces_warning(FutureWarning, match=msg): + result = idx.format() + assert result[1] == "foo two" + + +def test_format_sparse_display(): + index = MultiIndex( + levels=[[0, 1], [0, 1], [0, 1], [0]], + codes=[ + [0, 0, 0, 1, 1, 1], + [0, 0, 1, 0, 0, 1], + [0, 1, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 0], + ], + ) + msg = "MultiIndex.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = index.format() + assert result[3] == "1 0 0 0" + + +def test_repr_with_unicode_data(): + with pd.option_context("display.encoding", "UTF-8"): + d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} + index = pd.DataFrame(d).set_index(["a", "b"]).index + assert "\\" not in repr(index) # we don't want unicode-escaped + + +def test_repr_roundtrip_raises(): + mi = MultiIndex.from_product([list("ab"), range(3)], names=["first", "second"]) + msg = "Must pass both levels and codes" + with pytest.raises(TypeError, match=msg): + eval(repr(mi)) + + +def test_unicode_string_with_unicode(): + d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} + idx = pd.DataFrame(d).set_index(["a", "b"]).index + str(idx) + + +def test_repr_max_seq_item_setting(idx): + # GH10182 + idx = idx.repeat(50) + with pd.option_context("display.max_seq_items", None): + repr(idx) + assert "..." not in str(idx) + + +class TestRepr: + def test_unicode_repr_issues(self): + levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])] + codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)] + index = MultiIndex(levels=levels, codes=codes) + + repr(index.levels) + repr(index.get_level_values(1)) + + def test_repr_max_seq_items_equal_to_n(self, idx): + # display.max_seq_items == n + with pd.option_context("display.max_seq_items", 6): + result = idx.__repr__() + expected = """\ +MultiIndex([('foo', 'one'), + ('foo', 'two'), + ('bar', 'one'), + ('baz', 'two'), + ('qux', 'one'), + ('qux', 'two')], + names=['first', 'second'])""" + assert result == expected + + def test_repr(self, idx): + result = idx[:1].__repr__() + expected = """\ +MultiIndex([('foo', 'one')], + names=['first', 'second'])""" + assert result == expected + + result = idx.__repr__() + expected = """\ +MultiIndex([('foo', 'one'), + ('foo', 'two'), + ('bar', 'one'), + ('baz', 'two'), + ('qux', 'one'), + ('qux', 'two')], + names=['first', 'second'])""" + assert result == expected + + with pd.option_context("display.max_seq_items", 5): + result = idx.__repr__() + expected = """\ +MultiIndex([('foo', 'one'), + ('foo', 'two'), + ... + ('qux', 'one'), + ('qux', 'two')], + names=['first', 'second'], length=6)""" + assert result == expected + + # display.max_seq_items == 1 + with pd.option_context("display.max_seq_items", 1): + result = idx.__repr__() + expected = """\ +MultiIndex([... + ('qux', 'two')], + names=['first', ...], length=6)""" + assert result == expected + + def test_rjust(self): + n = 1000 + ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n)) + dti = pd.date_range("2000-01-01", freq="s", periods=n * 2) + mi = MultiIndex.from_arrays([ci, ci.codes + 9, dti], names=["a", "b", "dti"]) + result = mi[:1].__repr__() + expected = """\ +MultiIndex([('a', 9, '2000-01-01 00:00:00')], + names=['a', 'b', 'dti'])""" + assert result == expected + + result = mi[::500].__repr__() + expected = """\ +MultiIndex([( 'a', 9, '2000-01-01 00:00:00'), + ( 'a', 9, '2000-01-01 00:08:20'), + ('abc', 10, '2000-01-01 00:16:40'), + ('abc', 10, '2000-01-01 00:25:00')], + names=['a', 'b', 'dti'])""" + assert result == expected + + result = mi.__repr__() + expected = """\ +MultiIndex([( 'a', 9, '2000-01-01 00:00:00'), + ( 'a', 9, '2000-01-01 00:00:01'), + ( 'a', 9, '2000-01-01 00:00:02'), + ( 'a', 9, '2000-01-01 00:00:03'), + ( 'a', 9, '2000-01-01 00:00:04'), + ( 'a', 9, '2000-01-01 00:00:05'), + ( 'a', 9, '2000-01-01 00:00:06'), + ( 'a', 9, '2000-01-01 00:00:07'), + ( 'a', 9, '2000-01-01 00:00:08'), + ( 'a', 9, '2000-01-01 00:00:09'), + ... + ('abc', 10, '2000-01-01 00:33:10'), + ('abc', 10, '2000-01-01 00:33:11'), + ('abc', 10, '2000-01-01 00:33:12'), + ('abc', 10, '2000-01-01 00:33:13'), + ('abc', 10, '2000-01-01 00:33:14'), + ('abc', 10, '2000-01-01 00:33:15'), + ('abc', 10, '2000-01-01 00:33:16'), + ('abc', 10, '2000-01-01 00:33:17'), + ('abc', 10, '2000-01-01 00:33:18'), + ('abc', 10, '2000-01-01 00:33:19')], + names=['a', 'b', 'dti'], length=2000)""" + assert result == expected + + def test_tuple_width(self): + n = 1000 + ci = pd.CategoricalIndex(list("a" * n) + (["abc"] * n)) + dti = pd.date_range("2000-01-01", freq="s", periods=n * 2) + levels = [ci, ci.codes + 9, dti, dti, dti] + names = ["a", "b", "dti_1", "dti_2", "dti_3"] + mi = MultiIndex.from_arrays(levels, names=names) + result = mi[:1].__repr__() + expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)], + names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" # noqa: E501 + assert result == expected + + result = mi[:10].__repr__() + expected = """\ +MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...), + ('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...), + ('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...), + ('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...), + ('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...), + ('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...), + ('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...), + ('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...), + ('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...), + ('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)], + names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])""" + assert result == expected + + result = mi.__repr__() + expected = """\ +MultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...), + ( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...), + ( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...), + ( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...), + ( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...), + ( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...), + ( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...), + ( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...), + ( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...), + ( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...), + ... + ('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...), + ('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...), + ('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...), + ('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...), + ('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...), + ('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...), + ('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...), + ('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...), + ('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...), + ('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)], + names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)""" + assert result == expected + + def test_multiindex_long_element(self): + # Non-regression test towards GH#52960 + data = MultiIndex.from_tuples([("c" * 62,)]) + + expected = ( + "MultiIndex([('cccccccccccccccccccccccccccccccccccccccc" + "cccccccccccccccccccccc',)],\n )" + ) + assert str(data) == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_get_level_values.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_get_level_values.py new file mode 100644 index 0000000000000000000000000000000000000000..28c77e78924cbc35feed4ae838b81f6be38478b5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_get_level_values.py @@ -0,0 +1,124 @@ +import numpy as np + +import pandas as pd +from pandas import ( + CategoricalIndex, + Index, + MultiIndex, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestGetLevelValues: + def test_get_level_values_box_datetime64(self): + dates = date_range("1/1/2000", periods=4) + levels = [dates, [0, 1]] + codes = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]] + + index = MultiIndex(levels=levels, codes=codes) + + assert isinstance(index.get_level_values(0)[0], Timestamp) + + +def test_get_level_values(idx): + result = idx.get_level_values(0) + expected = Index(["foo", "foo", "bar", "baz", "qux", "qux"], name="first") + tm.assert_index_equal(result, expected) + assert result.name == "first" + + result = idx.get_level_values("first") + expected = idx.get_level_values(0) + tm.assert_index_equal(result, expected) + + # GH 10460 + index = MultiIndex( + levels=[CategoricalIndex(["A", "B"]), CategoricalIndex([1, 2, 3])], + codes=[np.array([0, 0, 0, 1, 1, 1]), np.array([0, 1, 2, 0, 1, 2])], + ) + + exp = CategoricalIndex(["A", "A", "A", "B", "B", "B"]) + tm.assert_index_equal(index.get_level_values(0), exp) + exp = CategoricalIndex([1, 2, 3, 1, 2, 3]) + tm.assert_index_equal(index.get_level_values(1), exp) + + +def test_get_level_values_all_na(): + # GH#17924 when level entirely consists of nan + arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = Index([np.nan, np.nan, np.nan], dtype=np.float64) + tm.assert_index_equal(result, expected) + + result = index.get_level_values(1) + expected = Index(["a", np.nan, 1], dtype=object) + tm.assert_index_equal(result, expected) + + +def test_get_level_values_int_with_na(): + # GH#17924 + arrays = [["a", "b", "b"], [1, np.nan, 2]] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = Index([1, np.nan, 2]) + tm.assert_index_equal(result, expected) + + arrays = [["a", "b", "b"], [np.nan, np.nan, 2]] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = Index([np.nan, np.nan, 2]) + tm.assert_index_equal(result, expected) + + +def test_get_level_values_na(): + arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = Index([np.nan, np.nan, np.nan]) + tm.assert_index_equal(result, expected) + + result = index.get_level_values(1) + expected = Index(["a", np.nan, 1]) + tm.assert_index_equal(result, expected) + + arrays = [["a", "b", "b"], pd.DatetimeIndex([0, 1, pd.NaT])] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(1) + expected = pd.DatetimeIndex([0, 1, pd.NaT]) + tm.assert_index_equal(result, expected) + + arrays = [[], []] + index = MultiIndex.from_arrays(arrays) + result = index.get_level_values(0) + expected = Index([], dtype=object) + tm.assert_index_equal(result, expected) + + +def test_get_level_values_when_periods(): + # GH33131. See also discussion in GH32669. + # This test can probably be removed when PeriodIndex._engine is removed. + from pandas import ( + Period, + PeriodIndex, + ) + + idx = MultiIndex.from_arrays( + [PeriodIndex([Period("2019Q1"), Period("2019Q2")], name="b")] + ) + idx2 = MultiIndex.from_arrays( + [idx._get_level_values(level) for level in range(idx.nlevels)] + ) + assert all(x.is_monotonic_increasing for x in idx2.levels) + + +def test_values_loses_freq_of_underlying_index(): + # GH#49054 + idx = pd.DatetimeIndex(date_range("20200101", periods=3, freq="BME")) + expected = idx.copy(deep=True) + idx2 = Index([1, 2, 3]) + midx = MultiIndex(levels=[idx, idx2], codes=[[0, 1, 2], [0, 1, 2]]) + midx.values + assert idx.freq is not None + tm.assert_index_equal(idx, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_get_set.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_get_set.py new file mode 100644 index 0000000000000000000000000000000000000000..6eeaeb6711d03c981fb190964de2ec5501380378 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_get_set.py @@ -0,0 +1,384 @@ +import numpy as np +import pytest + +from pandas.compat import PY311 + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas import ( + CategoricalIndex, + MultiIndex, +) +import pandas._testing as tm + + +def assert_matching(actual, expected, check_dtype=False): + # avoid specifying internal representation + # as much as possible + assert len(actual) == len(expected) + for act, exp in zip(actual, expected): + act = np.asarray(act) + exp = np.asarray(exp) + tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) + + +def test_get_level_number_integer(idx): + idx.names = [1, 0] + assert idx._get_level_number(1) == 0 + assert idx._get_level_number(0) == 1 + msg = "Too many levels: Index has only 2 levels, not 3" + with pytest.raises(IndexError, match=msg): + idx._get_level_number(2) + with pytest.raises(KeyError, match="Level fourth not found"): + idx._get_level_number("fourth") + + +def test_get_dtypes(using_infer_string): + # Test MultiIndex.dtypes (# Gh37062) + idx_multitype = MultiIndex.from_product( + [[1, 2, 3], ["a", "b", "c"], pd.date_range("20200101", periods=2, tz="UTC")], + names=["int", "string", "dt"], + ) + + exp = "object" if not using_infer_string else "string" + expected = pd.Series( + { + "int": np.dtype("int64"), + "string": exp, + "dt": DatetimeTZDtype(tz="utc"), + } + ) + tm.assert_series_equal(expected, idx_multitype.dtypes) + + +def test_get_dtypes_no_level_name(using_infer_string): + # Test MultiIndex.dtypes (# GH38580 ) + idx_multitype = MultiIndex.from_product( + [ + [1, 2, 3], + ["a", "b", "c"], + pd.date_range("20200101", periods=2, tz="UTC"), + ], + ) + exp = "object" if not using_infer_string else "string" + expected = pd.Series( + { + "level_0": np.dtype("int64"), + "level_1": exp, + "level_2": DatetimeTZDtype(tz="utc"), + } + ) + tm.assert_series_equal(expected, idx_multitype.dtypes) + + +def test_get_dtypes_duplicate_level_names(using_infer_string): + # Test MultiIndex.dtypes with non-unique level names (# GH45174) + result = MultiIndex.from_product( + [ + [1, 2, 3], + ["a", "b", "c"], + pd.date_range("20200101", periods=2, tz="UTC"), + ], + names=["A", "A", "A"], + ).dtypes + exp = "object" if not using_infer_string else "string" + expected = pd.Series( + [np.dtype("int64"), exp, DatetimeTZDtype(tz="utc")], + index=["A", "A", "A"], + ) + tm.assert_series_equal(result, expected) + + +def test_get_level_number_out_of_bounds(multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + + with pytest.raises(IndexError, match="Too many levels"): + frame.index._get_level_number(2) + with pytest.raises(IndexError, match="not a valid level number"): + frame.index._get_level_number(-3) + + +def test_set_name_methods(idx): + # so long as these are synonyms, we don't need to test set_names + index_names = ["first", "second"] + assert idx.rename == idx.set_names + new_names = [name + "SUFFIX" for name in index_names] + ind = idx.set_names(new_names) + assert idx.names == index_names + assert ind.names == new_names + msg = "Length of names must match number of levels in MultiIndex" + with pytest.raises(ValueError, match=msg): + ind.set_names(new_names + new_names) + new_names2 = [name + "SUFFIX2" for name in new_names] + res = ind.set_names(new_names2, inplace=True) + assert res is None + assert ind.names == new_names2 + + # set names for specific level (# GH7792) + ind = idx.set_names(new_names[0], level=0) + assert idx.names == index_names + assert ind.names == [new_names[0], index_names[1]] + + res = ind.set_names(new_names2[0], level=0, inplace=True) + assert res is None + assert ind.names == [new_names2[0], index_names[1]] + + # set names for multiple levels + ind = idx.set_names(new_names, level=[0, 1]) + assert idx.names == index_names + assert ind.names == new_names + + res = ind.set_names(new_names2, level=[0, 1], inplace=True) + assert res is None + assert ind.names == new_names2 + + +def test_set_levels_codes_directly(idx): + # setting levels/codes directly raises AttributeError + + levels = idx.levels + new_levels = [[lev + "a" for lev in level] for level in levels] + + codes = idx.codes + major_codes, minor_codes = codes + major_codes = [(x + 1) % 3 for x in major_codes] + minor_codes = [(x + 1) % 1 for x in minor_codes] + new_codes = [major_codes, minor_codes] + + msg = "Can't set attribute" + with pytest.raises(AttributeError, match=msg): + idx.levels = new_levels + + msg = ( + "property 'codes' of 'MultiIndex' object has no setter" + if PY311 + else "can't set attribute" + ) + with pytest.raises(AttributeError, match=msg): + idx.codes = new_codes + + +def test_set_levels(idx): + # side note - you probably wouldn't want to use levels and codes + # directly like this - but it is possible. + levels = idx.levels + new_levels = [[lev + "a" for lev in level] for level in levels] + + # level changing [w/o mutation] + ind2 = idx.set_levels(new_levels) + assert_matching(ind2.levels, new_levels) + assert_matching(idx.levels, levels) + + # level changing specific level [w/o mutation] + ind2 = idx.set_levels(new_levels[0], level=0) + assert_matching(ind2.levels, [new_levels[0], levels[1]]) + assert_matching(idx.levels, levels) + + ind2 = idx.set_levels(new_levels[1], level=1) + assert_matching(ind2.levels, [levels[0], new_levels[1]]) + assert_matching(idx.levels, levels) + + # level changing multiple levels [w/o mutation] + ind2 = idx.set_levels(new_levels, level=[0, 1]) + assert_matching(ind2.levels, new_levels) + assert_matching(idx.levels, levels) + + # illegal level changing should not change levels + # GH 13754 + original_index = idx.copy() + with pytest.raises(ValueError, match="^On"): + idx.set_levels(["c"], level=0) + assert_matching(idx.levels, original_index.levels, check_dtype=True) + + with pytest.raises(ValueError, match="^On"): + idx.set_codes([0, 1, 2, 3, 4, 5], level=0) + assert_matching(idx.codes, original_index.codes, check_dtype=True) + + with pytest.raises(TypeError, match="^Levels"): + idx.set_levels("c", level=0) + assert_matching(idx.levels, original_index.levels, check_dtype=True) + + with pytest.raises(TypeError, match="^Codes"): + idx.set_codes(1, level=0) + assert_matching(idx.codes, original_index.codes, check_dtype=True) + + +def test_set_codes(idx): + # side note - you probably wouldn't want to use levels and codes + # directly like this - but it is possible. + codes = idx.codes + major_codes, minor_codes = codes + major_codes = [(x + 1) % 3 for x in major_codes] + minor_codes = [(x + 1) % 1 for x in minor_codes] + new_codes = [major_codes, minor_codes] + + # changing codes w/o mutation + ind2 = idx.set_codes(new_codes) + assert_matching(ind2.codes, new_codes) + assert_matching(idx.codes, codes) + + # codes changing specific level w/o mutation + ind2 = idx.set_codes(new_codes[0], level=0) + assert_matching(ind2.codes, [new_codes[0], codes[1]]) + assert_matching(idx.codes, codes) + + ind2 = idx.set_codes(new_codes[1], level=1) + assert_matching(ind2.codes, [codes[0], new_codes[1]]) + assert_matching(idx.codes, codes) + + # codes changing multiple levels w/o mutation + ind2 = idx.set_codes(new_codes, level=[0, 1]) + assert_matching(ind2.codes, new_codes) + assert_matching(idx.codes, codes) + + # label changing for levels of different magnitude of categories + ind = MultiIndex.from_tuples([(0, i) for i in range(130)]) + new_codes = range(129, -1, -1) + expected = MultiIndex.from_tuples([(0, i) for i in new_codes]) + + # [w/o mutation] + result = ind.set_codes(codes=new_codes, level=1) + assert result.equals(expected) + + +def test_set_levels_codes_names_bad_input(idx): + levels, codes = idx.levels, idx.codes + names = idx.names + + with pytest.raises(ValueError, match="Length of levels"): + idx.set_levels([levels[0]]) + + with pytest.raises(ValueError, match="Length of codes"): + idx.set_codes([codes[0]]) + + with pytest.raises(ValueError, match="Length of names"): + idx.set_names([names[0]]) + + # shouldn't scalar data error, instead should demand list-like + with pytest.raises(TypeError, match="list of lists-like"): + idx.set_levels(levels[0]) + + # shouldn't scalar data error, instead should demand list-like + with pytest.raises(TypeError, match="list of lists-like"): + idx.set_codes(codes[0]) + + # shouldn't scalar data error, instead should demand list-like + with pytest.raises(TypeError, match="list-like"): + idx.set_names(names[0]) + + # should have equal lengths + with pytest.raises(TypeError, match="list of lists-like"): + idx.set_levels(levels[0], level=[0, 1]) + + with pytest.raises(TypeError, match="list-like"): + idx.set_levels(levels, level=0) + + # should have equal lengths + with pytest.raises(TypeError, match="list of lists-like"): + idx.set_codes(codes[0], level=[0, 1]) + + with pytest.raises(TypeError, match="list-like"): + idx.set_codes(codes, level=0) + + # should have equal lengths + with pytest.raises(ValueError, match="Length of names"): + idx.set_names(names[0], level=[0, 1]) + + with pytest.raises(TypeError, match="Names must be a"): + idx.set_names(names, level=0) + + +@pytest.mark.parametrize("inplace", [True, False]) +def test_set_names_with_nlevel_1(inplace): + # GH 21149 + # Ensure that .set_names for MultiIndex with + # nlevels == 1 does not raise any errors + expected = MultiIndex(levels=[[0, 1]], codes=[[0, 1]], names=["first"]) + m = MultiIndex.from_product([[0, 1]]) + result = m.set_names("first", level=0, inplace=inplace) + + if inplace: + result = m + + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("ordered", [True, False]) +def test_set_levels_categorical(ordered): + # GH13854 + index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]]) + + cidx = CategoricalIndex(list("bac"), ordered=ordered) + result = index.set_levels(cidx, level=0) + expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], codes=index.codes) + tm.assert_index_equal(result, expected) + + result_lvl = result.get_level_values(0) + expected_lvl = CategoricalIndex( + list("bacb"), categories=cidx.categories, ordered=cidx.ordered + ) + tm.assert_index_equal(result_lvl, expected_lvl) + + +def test_set_value_keeps_names(): + # motivating example from #3742 + lev1 = ["hans", "hans", "hans", "grethe", "grethe", "grethe"] + lev2 = ["1", "2", "3"] * 2 + idx = MultiIndex.from_arrays([lev1, lev2], names=["Name", "Number"]) + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + columns=["one", "two", "three", "four"], + index=idx, + ) + df = df.sort_index() + assert df._is_copy is None + assert df.index.names == ("Name", "Number") + df.at[("grethe", "4"), "one"] = 99.34 + assert df._is_copy is None + assert df.index.names == ("Name", "Number") + + +def test_set_levels_with_iterable(): + # GH23273 + sizes = [1, 2, 3] + colors = ["black"] * 3 + index = MultiIndex.from_arrays([sizes, colors], names=["size", "color"]) + + result = index.set_levels(map(int, ["3", "2", "1"]), level="size") + + expected_sizes = [3, 2, 1] + expected = MultiIndex.from_arrays([expected_sizes, colors], names=["size", "color"]) + tm.assert_index_equal(result, expected) + + +def test_set_empty_level(): + # GH#48636 + midx = MultiIndex.from_arrays([[]], names=["A"]) + result = midx.set_levels(pd.DatetimeIndex([]), level=0) + expected = MultiIndex.from_arrays([pd.DatetimeIndex([])], names=["A"]) + tm.assert_index_equal(result, expected) + + +def test_set_levels_pos_args_removal(): + # https://github.com/pandas-dev/pandas/issues/41485 + idx = MultiIndex.from_tuples( + [ + (1, "one"), + (3, "one"), + ], + names=["foo", "bar"], + ) + with pytest.raises(TypeError, match="positional arguments"): + idx.set_levels(["a", "b", "c"], 0) + + with pytest.raises(TypeError, match="positional arguments"): + idx.set_codes([[0, 1], [1, 0]], 0) + + +def test_set_levels_categorical_keep_dtype(): + # GH#52125 + midx = MultiIndex.from_arrays([[5, 6]]) + result = midx.set_levels(levels=pd.Categorical([1, 2]), level=0) + expected = MultiIndex.from_arrays([pd.Categorical([1, 2])]) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_integrity.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_integrity.py new file mode 100644 index 0000000000000000000000000000000000000000..d956747cbc859f40b69e52ea78c85ebce31f3427 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_integrity.py @@ -0,0 +1,289 @@ +import re + +import numpy as np +import pytest + +from pandas._libs import index as libindex + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + +import pandas as pd +from pandas import ( + Index, + IntervalIndex, + MultiIndex, + RangeIndex, +) +import pandas._testing as tm + + +def test_labels_dtypes(): + # GH 8456 + i = MultiIndex.from_tuples([("A", 1), ("A", 2)]) + assert i.codes[0].dtype == "int8" + assert i.codes[1].dtype == "int8" + + i = MultiIndex.from_product([["a"], range(40)]) + assert i.codes[1].dtype == "int8" + i = MultiIndex.from_product([["a"], range(400)]) + assert i.codes[1].dtype == "int16" + i = MultiIndex.from_product([["a"], range(40000)]) + assert i.codes[1].dtype == "int32" + + i = MultiIndex.from_product([["a"], range(1000)]) + assert (i.codes[0] >= 0).all() + assert (i.codes[1] >= 0).all() + + +def test_values_boxed(): + tuples = [ + (1, pd.Timestamp("2000-01-01")), + (2, pd.NaT), + (3, pd.Timestamp("2000-01-03")), + (1, pd.Timestamp("2000-01-04")), + (2, pd.Timestamp("2000-01-02")), + (3, pd.Timestamp("2000-01-03")), + ] + result = MultiIndex.from_tuples(tuples) + expected = construct_1d_object_array_from_listlike(tuples) + tm.assert_numpy_array_equal(result.values, expected) + # Check that code branches for boxed values produce identical results + tm.assert_numpy_array_equal(result.values[:4], result[:4].values) + + +def test_values_multiindex_datetimeindex(): + # Test to ensure we hit the boxing / nobox part of MI.values + ints = np.arange(10**18, 10**18 + 5) + naive = pd.DatetimeIndex(ints) + + aware = pd.DatetimeIndex(ints, tz="US/Central") + + idx = MultiIndex.from_arrays([naive, aware]) + result = idx.values + + outer = pd.DatetimeIndex([x[0] for x in result]) + tm.assert_index_equal(outer, naive) + + inner = pd.DatetimeIndex([x[1] for x in result]) + tm.assert_index_equal(inner, aware) + + # n_lev > n_lab + result = idx[:2].values + + outer = pd.DatetimeIndex([x[0] for x in result]) + tm.assert_index_equal(outer, naive[:2]) + + inner = pd.DatetimeIndex([x[1] for x in result]) + tm.assert_index_equal(inner, aware[:2]) + + +def test_values_multiindex_periodindex(): + # Test to ensure we hit the boxing / nobox part of MI.values + ints = np.arange(2007, 2012) + pidx = pd.PeriodIndex(ints, freq="D") + + idx = MultiIndex.from_arrays([ints, pidx]) + result = idx.values + + outer = Index([x[0] for x in result]) + tm.assert_index_equal(outer, Index(ints, dtype=np.int64)) + + inner = pd.PeriodIndex([x[1] for x in result]) + tm.assert_index_equal(inner, pidx) + + # n_lev > n_lab + result = idx[:2].values + + outer = Index([x[0] for x in result]) + tm.assert_index_equal(outer, Index(ints[:2], dtype=np.int64)) + + inner = pd.PeriodIndex([x[1] for x in result]) + tm.assert_index_equal(inner, pidx[:2]) + + +def test_consistency(): + # need to construct an overflow + major_axis = list(range(70000)) + minor_axis = list(range(10)) + + major_codes = np.arange(70000) + minor_codes = np.repeat(range(10), 7000) + + # the fact that is works means it's consistent + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + + # inconsistent + major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3]) + minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1]) + index = MultiIndex( + levels=[major_axis, minor_axis], codes=[major_codes, minor_codes] + ) + + assert index.is_unique is False + + +@pytest.mark.slow +def test_hash_collisions(monkeypatch): + # non-smoke test that we don't get hash collisions + size_cutoff = 50 + with monkeypatch.context() as m: + m.setattr(libindex, "_SIZE_CUTOFF", size_cutoff) + index = MultiIndex.from_product( + [np.arange(8), np.arange(8)], names=["one", "two"] + ) + result = index.get_indexer(index.values) + tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp")) + + for i in [0, 1, len(index) - 2, len(index) - 1]: + result = index.get_loc(index[i]) + assert result == i + + +def test_dims(): + pass + + +def test_take_invalid_kwargs(): + vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]] + idx = MultiIndex.from_product(vals, names=["str", "dt"]) + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + +def test_isna_behavior(idx): + # should not segfault GH5123 + # NOTE: if MI representation changes, may make sense to allow + # isna(MI) + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + pd.isna(idx) + + +def test_large_multiindex_error(monkeypatch): + # GH12527 + size_cutoff = 50 + with monkeypatch.context() as m: + m.setattr(libindex, "_SIZE_CUTOFF", size_cutoff) + df_below_cutoff = pd.DataFrame( + 1, + index=MultiIndex.from_product([[1, 2], range(size_cutoff - 1)]), + columns=["dest"], + ) + with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): + df_below_cutoff.loc[(-1, 0), "dest"] + with pytest.raises(KeyError, match=r"^\(3, 0\)$"): + df_below_cutoff.loc[(3, 0), "dest"] + df_above_cutoff = pd.DataFrame( + 1, + index=MultiIndex.from_product([[1, 2], range(size_cutoff + 1)]), + columns=["dest"], + ) + with pytest.raises(KeyError, match=r"^\(-1, 0\)$"): + df_above_cutoff.loc[(-1, 0), "dest"] + with pytest.raises(KeyError, match=r"^\(3, 0\)$"): + df_above_cutoff.loc[(3, 0), "dest"] + + +def test_mi_hashtable_populated_attribute_error(monkeypatch): + # GH 18165 + monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 50) + r = range(50) + df = pd.DataFrame({"a": r, "b": r}, index=MultiIndex.from_arrays([r, r])) + + msg = "'Series' object has no attribute 'foo'" + with pytest.raises(AttributeError, match=msg): + df["a"].foo() + + +def test_can_hold_identifiers(idx): + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is True + + +def test_metadata_immutable(idx): + levels, codes = idx.levels, idx.codes + # shouldn't be able to set at either the top level or base level + mutable_regex = re.compile("does not support mutable operations") + with pytest.raises(TypeError, match=mutable_regex): + levels[0] = levels[0] + with pytest.raises(TypeError, match=mutable_regex): + levels[0][0] = levels[0][0] + # ditto for labels + with pytest.raises(TypeError, match=mutable_regex): + codes[0] = codes[0] + with pytest.raises(ValueError, match="assignment destination is read-only"): + codes[0][0] = codes[0][0] + # and for names + names = idx.names + with pytest.raises(TypeError, match=mutable_regex): + names[0] = names[0] + + +def test_level_setting_resets_attributes(): + ind = MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]]) + assert ind.is_monotonic_increasing + ind = ind.set_levels([["A", "B"], [1, 3, 2]]) + # if this fails, probably didn't reset the cache correctly. + assert not ind.is_monotonic_increasing + + +def test_rangeindex_fallback_coercion_bug(): + # GH 12893 + df1 = pd.DataFrame(np.arange(100).reshape((10, 10))) + df2 = pd.DataFrame(np.arange(100).reshape((10, 10))) + df = pd.concat( + {"df1": df1.stack(future_stack=True), "df2": df2.stack(future_stack=True)}, + axis=1, + ) + df.index.names = ["fizz", "buzz"] + + expected = pd.DataFrame( + {"df2": np.arange(100), "df1": np.arange(100)}, + index=MultiIndex.from_product([range(10), range(10)], names=["fizz", "buzz"]), + ) + tm.assert_frame_equal(df, expected, check_like=True) + + result = df.index.get_level_values("fizz") + expected = Index(np.arange(10, dtype=np.int64), name="fizz").repeat(10) + tm.assert_index_equal(result, expected) + + result = df.index.get_level_values("buzz") + expected = Index(np.tile(np.arange(10, dtype=np.int64), 10), name="buzz") + tm.assert_index_equal(result, expected) + + +def test_memory_usage(idx): + result = idx.memory_usage() + if len(idx): + idx.get_loc(idx[0]) + result2 = idx.memory_usage() + result3 = idx.memory_usage(deep=True) + + # RangeIndex, IntervalIndex + # don't have engines + if not isinstance(idx, (RangeIndex, IntervalIndex)): + assert result2 > result + + if idx.inferred_type == "object": + assert result3 > result2 + + else: + # we report 0 for no-length + assert result == 0 + + +def test_nlevels(idx): + assert idx.nlevels == 2 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_isin.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_isin.py new file mode 100644 index 0000000000000000000000000000000000000000..68fdf25359f1bbada24f6a2403d5a04331bee84c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_isin.py @@ -0,0 +1,103 @@ +import numpy as np +import pytest + +from pandas import MultiIndex +import pandas._testing as tm + + +def test_isin_nan(): + idx = MultiIndex.from_arrays([["foo", "bar"], [1.0, np.nan]]) + tm.assert_numpy_array_equal(idx.isin([("bar", np.nan)]), np.array([False, True])) + tm.assert_numpy_array_equal( + idx.isin([("bar", float("nan"))]), np.array([False, True]) + ) + + +def test_isin_missing(nulls_fixture): + # GH48905 + mi1 = MultiIndex.from_tuples([(1, nulls_fixture)]) + mi2 = MultiIndex.from_tuples([(1, 1), (1, 2)]) + result = mi2.isin(mi1) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_isin(): + values = [("foo", 2), ("bar", 3), ("quux", 4)] + + idx = MultiIndex.from_arrays([["qux", "baz", "foo", "bar"], np.arange(4)]) + result = idx.isin(values) + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + # empty, return dtype bool + idx = MultiIndex.from_arrays([[], []]) + result = idx.isin(values) + assert len(result) == 0 + assert result.dtype == np.bool_ + + +def test_isin_level_kwarg(): + idx = MultiIndex.from_arrays([["qux", "baz", "foo", "bar"], np.arange(4)]) + + vals_0 = ["foo", "bar", "quux"] + vals_1 = [2, 3, 10] + + expected = np.array([False, False, True, True]) + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0)) + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2)) + + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1)) + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1)) + + msg = "Too many levels: Index has only 2 levels, not 6" + with pytest.raises(IndexError, match=msg): + idx.isin(vals_0, level=5) + msg = "Too many levels: Index has only 2 levels, -5 is not a valid level number" + with pytest.raises(IndexError, match=msg): + idx.isin(vals_0, level=-5) + + with pytest.raises(KeyError, match=r"'Level 1\.0 not found'"): + idx.isin(vals_0, level=1.0) + with pytest.raises(KeyError, match=r"'Level -1\.0 not found'"): + idx.isin(vals_1, level=-1.0) + with pytest.raises(KeyError, match="'Level A not found'"): + idx.isin(vals_1, level="A") + + idx.names = ["A", "B"] + tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level="A")) + tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level="B")) + + with pytest.raises(KeyError, match="'Level C not found'"): + idx.isin(vals_1, level="C") + + +@pytest.mark.parametrize( + "labels,expected,level", + [ + ([("b", np.nan)], np.array([False, False, True]), None), + ([np.nan, "a"], np.array([True, True, False]), 0), + (["d", np.nan], np.array([False, True, True]), 1), + ], +) +def test_isin_multi_index_with_missing_value(labels, expected, level): + # GH 19132 + midx = MultiIndex.from_arrays([[np.nan, "a", "b"], ["c", "d", np.nan]]) + result = midx.isin(labels, level=level) + tm.assert_numpy_array_equal(result, expected) + + +def test_isin_empty(): + # GH#51599 + midx = MultiIndex.from_arrays([[1, 2], [3, 4]]) + result = midx.isin([]) + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_isin_generator(): + # GH#52568 + midx = MultiIndex.from_tuples([(1, 2)]) + result = midx.isin(x for x in [(1, 2)]) + expected = np.array([True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_join.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_join.py new file mode 100644 index 0000000000000000000000000000000000000000..edd0feaaa1159ff8340af772d27f2a7af09ceb87 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_join.py @@ -0,0 +1,268 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Interval, + MultiIndex, + Series, + StringDtype, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "other", [Index(["three", "one", "two"]), Index(["one"]), Index(["one", "three"])] +) +def test_join_level(idx, other, join_type): + join_index, lidx, ridx = other.join( + idx, how=join_type, level="second", return_indexers=True + ) + + exp_level = other.join(idx.levels[1], how=join_type) + assert join_index.levels[0].equals(idx.levels[0]) + assert join_index.levels[1].equals(exp_level) + + # pare down levels + mask = np.array([x[1] in exp_level for x in idx], dtype=bool) + exp_values = idx.values[mask] + tm.assert_numpy_array_equal(join_index.values, exp_values) + + if join_type in ("outer", "inner"): + join_index2, ridx2, lidx2 = idx.join( + other, how=join_type, level="second", return_indexers=True + ) + + assert join_index.equals(join_index2) + tm.assert_numpy_array_equal(lidx, lidx2) + tm.assert_numpy_array_equal(ridx, ridx2) + tm.assert_numpy_array_equal(join_index2.values, exp_values) + + +def test_join_level_corner_case(idx): + # some corner cases + index = Index(["three", "one", "two"]) + result = index.join(idx, level="second") + assert isinstance(result, MultiIndex) + + with pytest.raises(TypeError, match="Join.*MultiIndex.*ambiguous"): + idx.join(idx, level=1) + + +def test_join_self(idx, join_type): + result = idx.join(idx, how=join_type) + expected = idx + if join_type == "outer": + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + +def test_join_multi(): + # GH 10665 + midx = MultiIndex.from_product([np.arange(4), np.arange(4)], names=["a", "b"]) + idx = Index([1, 2, 5], name="b") + + # inner + jidx, lidx, ridx = midx.join(idx, how="inner", return_indexers=True) + exp_idx = MultiIndex.from_product([np.arange(4), [1, 2]], names=["a", "b"]) + exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp) + exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp) + tm.assert_index_equal(jidx, exp_idx) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) + # flip + jidx, ridx, lidx = idx.join(midx, how="inner", return_indexers=True) + tm.assert_index_equal(jidx, exp_idx) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) + + # keep MultiIndex + jidx, lidx, ridx = midx.join(idx, how="left", return_indexers=True) + exp_ridx = np.array( + [-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1], dtype=np.intp + ) + tm.assert_index_equal(jidx, midx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + # flip + jidx, ridx, lidx = idx.join(midx, how="right", return_indexers=True) + tm.assert_index_equal(jidx, midx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + + +def test_join_multi_wrong_order(): + # GH 25760 + # GH 28956 + + midx1 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + midx2 = MultiIndex.from_product([[1, 2], [3, 4]], names=["b", "a"]) + + join_idx, lidx, ridx = midx1.join(midx2, return_indexers=True) + + exp_ridx = np.array([-1, -1, -1, -1], dtype=np.intp) + + tm.assert_index_equal(midx1, join_idx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + + +def test_join_multi_return_indexers(): + # GH 34074 + + midx1 = MultiIndex.from_product([[1, 2], [3, 4], [5, 6]], names=["a", "b", "c"]) + midx2 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + + result = midx1.join(midx2, return_indexers=False) + tm.assert_index_equal(result, midx1) + + +def test_join_overlapping_interval_level(): + # GH 44096 + idx_1 = MultiIndex.from_tuples( + [ + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (1, Interval(2.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), # interval limit is here at 3.0, not at 2.0 + (2, Interval(3.0, 5.0)), + ], + names=["num", "interval"], + ) + + idx_2 = MultiIndex.from_tuples( + [ + (1, Interval(2.0, 5.0)), + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (2, Interval(3.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), + ], + names=["num", "interval"], + ) + + expected = MultiIndex.from_tuples( + [ + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (1, Interval(2.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), + (2, Interval(3.0, 5.0)), + ], + names=["num", "interval"], + ) + result = idx_1.join(idx_2, how="outer") + + tm.assert_index_equal(result, expected) + + +def test_join_midx_ea(): + # GH#49277 + midx = MultiIndex.from_arrays( + [Series([1, 1, 3], dtype="Int64"), Series([1, 2, 3], dtype="Int64")], + names=["a", "b"], + ) + midx2 = MultiIndex.from_arrays( + [Series([1], dtype="Int64"), Series([3], dtype="Int64")], names=["a", "c"] + ) + result = midx.join(midx2, how="inner") + expected = MultiIndex.from_arrays( + [ + Series([1, 1], dtype="Int64"), + Series([1, 2], dtype="Int64"), + Series([3, 3], dtype="Int64"), + ], + names=["a", "b", "c"], + ) + tm.assert_index_equal(result, expected) + + +def test_join_midx_string(): + # GH#49277 + midx = MultiIndex.from_arrays( + [ + Series(["a", "a", "c"], dtype=StringDtype()), + Series(["a", "b", "c"], dtype=StringDtype()), + ], + names=["a", "b"], + ) + midx2 = MultiIndex.from_arrays( + [Series(["a"], dtype=StringDtype()), Series(["c"], dtype=StringDtype())], + names=["a", "c"], + ) + result = midx.join(midx2, how="inner") + expected = MultiIndex.from_arrays( + [ + Series(["a", "a"], dtype=StringDtype()), + Series(["a", "b"], dtype=StringDtype()), + Series(["c", "c"], dtype=StringDtype()), + ], + names=["a", "b", "c"], + ) + tm.assert_index_equal(result, expected) + + +def test_join_multi_with_nan(): + # GH29252 + df1 = DataFrame( + data={"col1": [1.1, 1.2]}, + index=MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]), + ) + df2 = DataFrame( + data={"col2": [2.1, 2.2]}, + index=MultiIndex.from_product([["A"], [np.nan, 2.0]], names=["id1", "id2"]), + ) + result = df1.join(df2) + expected = DataFrame( + data={"col1": [1.1, 1.2], "col2": [np.nan, 2.2]}, + index=MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("val", [0, 5]) +def test_join_dtypes(any_numeric_ea_dtype, val): + # GH#49830 + midx = MultiIndex.from_arrays([Series([1, 2], dtype=any_numeric_ea_dtype), [3, 4]]) + midx2 = MultiIndex.from_arrays( + [Series([1, val, val], dtype=any_numeric_ea_dtype), [3, 4, 4]] + ) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_arrays( + [Series([val, val, 1, 2], dtype=any_numeric_ea_dtype), [4, 4, 3, 4]] + ).sort_values() + tm.assert_index_equal(result, expected) + + +def test_join_dtypes_all_nan(any_numeric_ea_dtype): + # GH#49830 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [np.nan, np.nan]] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 0, 0], dtype=any_numeric_ea_dtype), [np.nan, np.nan, np.nan]] + ) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_arrays( + [ + Series([0, 0, 1, 2], dtype=any_numeric_ea_dtype), + [np.nan, np.nan, np.nan, np.nan], + ] + ) + tm.assert_index_equal(result, expected) + + +def test_join_index_levels(): + # GH#53093 + midx = midx = MultiIndex.from_tuples([("a", "2019-02-01"), ("a", "2019-02-01")]) + midx2 = MultiIndex.from_tuples([("a", "2019-01-31")]) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_tuples( + [("a", "2019-01-31"), ("a", "2019-02-01"), ("a", "2019-02-01")] + ) + tm.assert_index_equal(result.levels[1], expected.levels[1]) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_lexsort.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_lexsort.py new file mode 100644 index 0000000000000000000000000000000000000000..fc16a4197a3a4daf65de6f58d85d13883d535d41 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_lexsort.py @@ -0,0 +1,46 @@ +from pandas import MultiIndex + + +class TestIsLexsorted: + def test_is_lexsorted(self): + levels = [[0, 1], [0, 1, 2]] + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] + ) + assert index._is_lexsorted() + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]] + ) + assert not index._is_lexsorted() + + index = MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]] + ) + assert not index._is_lexsorted() + assert index._lexsort_depth == 0 + + +class TestLexsortDepth: + def test_lexsort_depth(self): + # Test that lexsort_depth return the correct sortorder + # when it was given to the MultiIndex const. + # GH#28518 + + levels = [[0, 1], [0, 1, 2]] + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 + ) + assert index._lexsort_depth == 2 + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=1 + ) + assert index._lexsort_depth == 1 + + index = MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=0 + ) + assert index._lexsort_depth == 0 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_missing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_missing.py new file mode 100644 index 0000000000000000000000000000000000000000..14ffc42fb4b59074c3c830a83ff6bdc36bdf099e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_missing.py @@ -0,0 +1,111 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import MultiIndex +import pandas._testing as tm + + +def test_fillna(idx): + # GH 11343 + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.fillna(idx[0]) + + +def test_dropna(): + # GH 6194 + idx = MultiIndex.from_arrays( + [ + [1, np.nan, 3, np.nan, 5], + [1, 2, np.nan, np.nan, 5], + ["a", "b", "c", np.nan, "e"], + ] + ) + + exp = MultiIndex.from_arrays([[1, 5], [1, 5], ["a", "e"]]) + tm.assert_index_equal(idx.dropna(), exp) + tm.assert_index_equal(idx.dropna(how="any"), exp) + + exp = MultiIndex.from_arrays( + [[1, np.nan, 3, 5], [1, 2, np.nan, 5], ["a", "b", "c", "e"]] + ) + tm.assert_index_equal(idx.dropna(how="all"), exp) + + msg = "invalid how option: xxx" + with pytest.raises(ValueError, match=msg): + idx.dropna(how="xxx") + + # GH26408 + # test if missing values are dropped for multiindex constructed + # from codes and values + idx = MultiIndex( + levels=[[np.nan, None, pd.NaT, "128", 2], [np.nan, None, pd.NaT, "128", 2]], + codes=[[0, -1, 1, 2, 3, 4], [0, -1, 3, 3, 3, 4]], + ) + expected = MultiIndex.from_arrays([["128", 2], ["128", 2]]) + tm.assert_index_equal(idx.dropna(), expected) + tm.assert_index_equal(idx.dropna(how="any"), expected) + + expected = MultiIndex.from_arrays( + [[np.nan, np.nan, "128", 2], ["128", "128", "128", 2]] + ) + tm.assert_index_equal(idx.dropna(how="all"), expected) + + +def test_nulls(idx): + # this is really a smoke test for the methods + # as these are adequately tested for function elsewhere + + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + idx.isna() + + +@pytest.mark.xfail(reason="isna is not defined for MultiIndex") +def test_hasnans_isnans(idx): + # GH 11343, added tests for hasnans / isnans + index = idx.copy() + + # cases in indices doesn't include NaN + expected = np.array([False] * len(index), dtype=bool) + tm.assert_numpy_array_equal(index._isnan, expected) + assert index.hasnans is False + + index = idx.copy() + values = index.values + values[1] = np.nan + + index = type(idx)(values) + + expected = np.array([False] * len(index), dtype=bool) + expected[1] = True + tm.assert_numpy_array_equal(index._isnan, expected) + assert index.hasnans is True + + +def test_nan_stays_float(): + # GH 7031 + idx0 = MultiIndex(levels=[["A", "B"], []], codes=[[1, 0], [-1, -1]], names=[0, 1]) + idx1 = MultiIndex(levels=[["C"], ["D"]], codes=[[0], [0]], names=[0, 1]) + idxm = idx0.join(idx1, how="outer") + assert pd.isna(idx0.get_level_values(1)).all() + # the following failed in 0.14.1 + assert pd.isna(idxm.get_level_values(1)[:-1]).all() + + df0 = pd.DataFrame([[1, 2]], index=idx0) + df1 = pd.DataFrame([[3, 4]], index=idx1) + dfm = df0 - df1 + assert pd.isna(df0.index.get_level_values(1)).all() + # the following failed in 0.14.1 + assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() + + +def test_tuples_have_na(): + index = MultiIndex( + levels=[[1, 0], [0, 1, 2, 3]], + codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]], + ) + + assert pd.isna(index[4][0]) + assert pd.isna(index.values[4][0]) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_names.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_names.py new file mode 100644 index 0000000000000000000000000000000000000000..45f19b4d70fb95cb2aee459a54d2ad53790b7df8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_names.py @@ -0,0 +1,201 @@ +import pytest + +import pandas as pd +from pandas import MultiIndex +import pandas._testing as tm + + +def check_level_names(index, names): + assert [level.name for level in index.levels] == list(names) + + +def test_slice_keep_name(): + x = MultiIndex.from_tuples([("a", "b"), (1, 2), ("c", "d")], names=["x", "y"]) + assert x[1:].names == x.names + + +def test_index_name_retained(): + # GH9857 + result = pd.DataFrame({"x": [1, 2, 6], "y": [2, 2, 8], "z": [-5, 0, 5]}) + result = result.set_index("z") + result.loc[10] = [9, 10] + df_expected = pd.DataFrame( + {"x": [1, 2, 6, 9], "y": [2, 2, 8, 10], "z": [-5, 0, 5, 10]} + ) + df_expected = df_expected.set_index("z") + tm.assert_frame_equal(result, df_expected) + + +def test_changing_names(idx): + assert [level.name for level in idx.levels] == ["first", "second"] + + view = idx.view() + copy = idx.copy() + shallow_copy = idx._view() + + # changing names should not change level names on object + new_names = [name + "a" for name in idx.names] + idx.names = new_names + check_level_names(idx, ["firsta", "seconda"]) + + # and not on copies + check_level_names(view, ["first", "second"]) + check_level_names(copy, ["first", "second"]) + check_level_names(shallow_copy, ["first", "second"]) + + # and copies shouldn't change original + shallow_copy.names = [name + "c" for name in shallow_copy.names] + check_level_names(idx, ["firsta", "seconda"]) + + +def test_take_preserve_name(idx): + taken = idx.take([3, 0, 1]) + assert taken.names == idx.names + + +def test_copy_names(): + # Check that adding a "names" parameter to the copy is honored + # GH14302 + multi_idx = MultiIndex.from_tuples([(1, 2), (3, 4)], names=["MyName1", "MyName2"]) + multi_idx1 = multi_idx.copy() + + assert multi_idx.equals(multi_idx1) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx1.names == ["MyName1", "MyName2"] + + multi_idx2 = multi_idx.copy(names=["NewName1", "NewName2"]) + + assert multi_idx.equals(multi_idx2) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx2.names == ["NewName1", "NewName2"] + + multi_idx3 = multi_idx.copy(name=["NewName1", "NewName2"]) + + assert multi_idx.equals(multi_idx3) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx3.names == ["NewName1", "NewName2"] + + # gh-35592 + with pytest.raises(ValueError, match="Length of new names must be 2, got 1"): + multi_idx.copy(names=["mario"]) + + with pytest.raises(TypeError, match="MultiIndex.name must be a hashable type"): + multi_idx.copy(names=[["mario"], ["luigi"]]) + + +def test_names(idx): + # names are assigned in setup + assert idx.names == ["first", "second"] + level_names = [level.name for level in idx.levels] + assert level_names == idx.names + + # setting bad names on existing + index = idx + with pytest.raises(ValueError, match="^Length of names"): + setattr(index, "names", list(index.names) + ["third"]) + with pytest.raises(ValueError, match="^Length of names"): + setattr(index, "names", []) + + # initializing with bad names (should always be equivalent) + major_axis, minor_axis = idx.levels + major_codes, minor_codes = idx.codes + with pytest.raises(ValueError, match="^Length of names"): + MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=["first"], + ) + with pytest.raises(ValueError, match="^Length of names"): + MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=["first", "second", "third"], + ) + + # names are assigned on index, but not transferred to the levels + index.names = ["a", "b"] + level_names = [level.name for level in index.levels] + assert level_names == ["a", "b"] + + +def test_duplicate_level_names_access_raises(idx): + # GH19029 + idx.names = ["foo", "foo"] + with pytest.raises(ValueError, match="name foo occurs multiple times"): + idx._get_level_number("foo") + + +def test_get_names_from_levels(): + idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + + assert idx.levels[0].name == "a" + assert idx.levels[1].name == "b" + + +def test_setting_names_from_levels_raises(): + idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + with pytest.raises(RuntimeError, match="set_names"): + idx.levels[0].name = "foo" + + with pytest.raises(RuntimeError, match="set_names"): + idx.levels[1].name = "foo" + + new = pd.Series(1, index=idx.levels[0]) + with pytest.raises(RuntimeError, match="set_names"): + new.index.name = "bar" + + assert pd.Index._no_setting_name is False + assert pd.RangeIndex._no_setting_name is False + + +@pytest.mark.parametrize("func", ["rename", "set_names"]) +@pytest.mark.parametrize( + "rename_dict, exp_names", + [ + ({"x": "z"}, ["z", "y", "z"]), + ({"x": "z", "y": "x"}, ["z", "x", "z"]), + ({"y": "z"}, ["x", "z", "x"]), + ({}, ["x", "y", "x"]), + ({"z": "a"}, ["x", "y", "x"]), + ({"y": "z", "a": "b"}, ["x", "z", "x"]), + ], +) +def test_name_mi_with_dict_like_duplicate_names(func, rename_dict, exp_names): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=["x", "y", "x"]) + result = getattr(mi, func)(rename_dict) + expected = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=exp_names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("func", ["rename", "set_names"]) +@pytest.mark.parametrize( + "rename_dict, exp_names", + [ + ({"x": "z"}, ["z", "y"]), + ({"x": "z", "y": "x"}, ["z", "x"]), + ({"a": "z"}, ["x", "y"]), + ({}, ["x", "y"]), + ], +) +def test_name_mi_with_dict_like(func, rename_dict, exp_names): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"]) + result = getattr(mi, func)(rename_dict) + expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=exp_names) + tm.assert_index_equal(result, expected) + + +def test_index_name_with_dict_like_raising(): + # GH#20421 + ix = pd.Index([1, 2]) + msg = "Can only pass dict-like as `names` for MultiIndex." + with pytest.raises(TypeError, match=msg): + ix.set_names({"x": "z"}) + + +def test_multiindex_name_and_level_raising(): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"]) + with pytest.raises(TypeError, match="Can not pass level for dictlike `names`."): + mi.set_names(names={"x": "z"}, level={"x": "z"}) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_partial_indexing.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_partial_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..64cc1fa621b3195727cbfb3e62a8b6a6acf4dfaf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_partial_indexing.py @@ -0,0 +1,148 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + IndexSlice, + MultiIndex, + date_range, +) +import pandas._testing as tm + + +@pytest.fixture +def df(): + # c1 + # 2016-01-01 00:00:00 a 0 + # b 1 + # c 2 + # 2016-01-01 12:00:00 a 3 + # b 4 + # c 5 + # 2016-01-02 00:00:00 a 6 + # b 7 + # c 8 + # 2016-01-02 12:00:00 a 9 + # b 10 + # c 11 + # 2016-01-03 00:00:00 a 12 + # b 13 + # c 14 + dr = date_range("2016-01-01", "2016-01-03", freq="12h") + abc = ["a", "b", "c"] + mi = MultiIndex.from_product([dr, abc]) + frame = DataFrame({"c1": range(15)}, index=mi) + return frame + + +def test_partial_string_matching_single_index(df): + # partial string matching on a single index + for df_swap in [df.swaplevel(), df.swaplevel(0), df.swaplevel(0, 1)]: + df_swap = df_swap.sort_index() + just_a = df_swap.loc["a"] + result = just_a.loc["2016-01-01"] + expected = df.loc[IndexSlice[:, "a"], :].iloc[0:2] + expected.index = expected.index.droplevel(1) + tm.assert_frame_equal(result, expected) + + +def test_get_loc_partial_timestamp_multiindex(df): + mi = df.index + key = ("2016-01-01", "a") + loc = mi.get_loc(key) + + expected = np.zeros(len(mi), dtype=bool) + expected[[0, 3]] = True + tm.assert_numpy_array_equal(loc, expected) + + key2 = ("2016-01-02", "a") + loc2 = mi.get_loc(key2) + expected2 = np.zeros(len(mi), dtype=bool) + expected2[[6, 9]] = True + tm.assert_numpy_array_equal(loc2, expected2) + + key3 = ("2016-01", "a") + loc3 = mi.get_loc(key3) + expected3 = np.zeros(len(mi), dtype=bool) + expected3[mi.get_level_values(1).get_loc("a")] = True + tm.assert_numpy_array_equal(loc3, expected3) + + key4 = ("2016", "a") + loc4 = mi.get_loc(key4) + expected4 = expected3 + tm.assert_numpy_array_equal(loc4, expected4) + + # non-monotonic + taker = np.arange(len(mi), dtype=np.intp) + taker[::2] = taker[::-2] + mi2 = mi.take(taker) + loc5 = mi2.get_loc(key) + expected5 = np.zeros(len(mi2), dtype=bool) + expected5[[3, 14]] = True + tm.assert_numpy_array_equal(loc5, expected5) + + +def test_partial_string_timestamp_multiindex(df): + # GH10331 + df_swap = df.swaplevel(0, 1).sort_index() + SLC = IndexSlice + + # indexing with IndexSlice + result = df.loc[SLC["2016-01-01":"2016-02-01", :], :] + expected = df + tm.assert_frame_equal(result, expected) + + # match on secondary index + result = df_swap.loc[SLC[:, "2016-01-01":"2016-01-01"], :] + expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]] + tm.assert_frame_equal(result, expected) + + # partial string match on year only + result = df.loc["2016"] + expected = df + tm.assert_frame_equal(result, expected) + + # partial string match on date + result = df.loc["2016-01-01"] + expected = df.iloc[0:6] + tm.assert_frame_equal(result, expected) + + # partial string match on date and hour, from middle + result = df.loc["2016-01-02 12"] + # hourly resolution, same as index.levels[0], so we are _not_ slicing on + # that level, so that level gets dropped + expected = df.iloc[9:12].droplevel(0) + tm.assert_frame_equal(result, expected) + + # partial string match on secondary index + result = df_swap.loc[SLC[:, "2016-01-02"], :] + expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]] + tm.assert_frame_equal(result, expected) + + # tuple selector with partial string match on date + # "2016-01-01" has daily resolution, so _is_ a slice on the first level. + result = df.loc[("2016-01-01", "a"), :] + expected = df.iloc[[0, 3]] + expected = df.iloc[[0, 3]].droplevel(1) + tm.assert_frame_equal(result, expected) + + # Slicing date on first level should break (of course) bc the DTI is the + # second level on df_swap + with pytest.raises(KeyError, match="'2016-01-01'"): + df_swap.loc["2016-01-01"] + + +def test_partial_string_timestamp_multiindex_str_key_raises(df): + # Even though this syntax works on a single index, this is somewhat + # ambiguous and we don't want to extend this behavior forward to work + # in multi-indexes. This would amount to selecting a scalar from a + # column. + with pytest.raises(KeyError, match="'2016-01-01'"): + df["2016-01-01"] + + +def test_partial_string_timestamp_multiindex_daily_resolution(df): + # GH12685 (partial string with daily resolution or below) + result = df.loc[IndexSlice["2013-03":"2013-03", :], :] + expected = df.iloc[118:180] + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_pickle.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..1d8b72140442159fa0b8c608022d167bddd95db4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_pickle.py @@ -0,0 +1,10 @@ +import pytest + +from pandas import MultiIndex + + +def test_pickle_compat_construction(): + # this is testing for pickle compat + # need an object to create with + with pytest.raises(TypeError, match="Must pass both levels and codes"): + MultiIndex() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_reindex.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_reindex.py new file mode 100644 index 0000000000000000000000000000000000000000..d1b4fe8b98760a0b776c5d81d471a7745e8407de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_reindex.py @@ -0,0 +1,174 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + MultiIndex, +) +import pandas._testing as tm + + +def test_reindex(idx): + result, indexer = idx.reindex(list(idx[:4])) + assert isinstance(result, MultiIndex) + assert result.names == ["first", "second"] + assert [level.name for level in result.levels] == ["first", "second"] + + result, indexer = idx.reindex(list(idx)) + assert isinstance(result, MultiIndex) + assert indexer is None + assert result.names == ["first", "second"] + assert [level.name for level in result.levels] == ["first", "second"] + + +def test_reindex_level(idx): + index = Index(["one"]) + + target, indexer = idx.reindex(index, level="second") + target2, indexer2 = index.reindex(idx, level="second") + + exp_index = idx.join(index, level="second", how="right") + exp_index2 = idx.join(index, level="second", how="left") + + assert target.equals(exp_index) + exp_indexer = np.array([0, 2, 4]) + tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False) + + assert target2.equals(exp_index2) + exp_indexer2 = np.array([0, -1, 0, -1, 0, -1]) + tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False) + + with pytest.raises(TypeError, match="Fill method not supported"): + idx.reindex(idx, method="pad", level="second") + + +def test_reindex_preserves_names_when_target_is_list_or_ndarray(idx): + # GH6552 + idx = idx.copy() + target = idx.copy() + idx.names = target.names = [None, None] + + other_dtype = MultiIndex.from_product([[1, 2], [3, 4]]) + + # list & ndarray cases + assert idx.reindex([])[0].names == [None, None] + assert idx.reindex(np.array([]))[0].names == [None, None] + assert idx.reindex(target.tolist())[0].names == [None, None] + assert idx.reindex(target.values)[0].names == [None, None] + assert idx.reindex(other_dtype.tolist())[0].names == [None, None] + assert idx.reindex(other_dtype.values)[0].names == [None, None] + + idx.names = ["foo", "bar"] + assert idx.reindex([])[0].names == ["foo", "bar"] + assert idx.reindex(np.array([]))[0].names == ["foo", "bar"] + assert idx.reindex(target.tolist())[0].names == ["foo", "bar"] + assert idx.reindex(target.values)[0].names == ["foo", "bar"] + assert idx.reindex(other_dtype.tolist())[0].names == ["foo", "bar"] + assert idx.reindex(other_dtype.values)[0].names == ["foo", "bar"] + + +def test_reindex_lvl_preserves_names_when_target_is_list_or_array(): + # GH7774 + idx = MultiIndex.from_product([[0, 1], ["a", "b"]], names=["foo", "bar"]) + assert idx.reindex([], level=0)[0].names == ["foo", "bar"] + assert idx.reindex([], level=1)[0].names == ["foo", "bar"] + + +def test_reindex_lvl_preserves_type_if_target_is_empty_list_or_array( + using_infer_string, +): + # GH7774 + idx = MultiIndex.from_product([[0, 1], ["a", "b"]]) + assert idx.reindex([], level=0)[0].levels[0].dtype.type == np.int64 + exp = np.object_ if not using_infer_string else str + assert idx.reindex([], level=1)[0].levels[1].dtype.type == exp + + # case with EA levels + cat = pd.Categorical(["foo", "bar"]) + dti = pd.date_range("2016-01-01", periods=2, tz="US/Pacific") + mi = MultiIndex.from_product([cat, dti]) + assert mi.reindex([], level=0)[0].levels[0].dtype == cat.dtype + assert mi.reindex([], level=1)[0].levels[1].dtype == dti.dtype + + +def test_reindex_base(idx): + expected = np.arange(idx.size, dtype=np.intp) + + actual = idx.get_indexer(idx) + tm.assert_numpy_array_equal(expected, actual) + + with pytest.raises(ValueError, match="Invalid fill method"): + idx.get_indexer(idx, method="invalid") + + +def test_reindex_non_unique(): + idx = MultiIndex.from_tuples([(0, 0), (1, 1), (1, 1), (2, 2)]) + a = pd.Series(np.arange(4), index=idx) + new_idx = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 2)]) + + msg = "cannot handle a non-unique multi-index!" + with pytest.raises(ValueError, match=msg): + a.reindex(new_idx) + + +@pytest.mark.parametrize("values", [[["a"], ["x"]], [[], []]]) +def test_reindex_empty_with_level(values): + # GH41170 + idx = MultiIndex.from_arrays(values) + result, result_indexer = idx.reindex(np.array(["b"]), level=0) + expected = MultiIndex(levels=[["b"], values[1]], codes=[[], []]) + expected_indexer = np.array([], dtype=result_indexer.dtype) + tm.assert_index_equal(result, expected) + tm.assert_numpy_array_equal(result_indexer, expected_indexer) + + +def test_reindex_not_all_tuples(): + keys = [("i", "i"), ("i", "j"), ("j", "i"), "j"] + mi = MultiIndex.from_tuples(keys[:-1]) + idx = Index(keys) + res, indexer = mi.reindex(idx) + + tm.assert_index_equal(res, idx) + expected = np.array([0, 1, 2, -1], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected) + + +def test_reindex_limit_arg_with_multiindex(): + # GH21247 + + idx = MultiIndex.from_tuples([(3, "A"), (4, "A"), (4, "B")]) + + df = pd.Series([0.02, 0.01, 0.012], index=idx) + + new_idx = MultiIndex.from_tuples( + [ + (3, "A"), + (3, "B"), + (4, "A"), + (4, "B"), + (4, "C"), + (5, "B"), + (5, "C"), + (6, "B"), + (6, "C"), + ] + ) + + with pytest.raises( + ValueError, + match="limit argument only valid if doing pad, backfill or nearest reindexing", + ): + df.reindex(new_idx, fill_value=0, limit=1) + + +def test_reindex_with_none_in_nested_multiindex(): + # GH42883 + index = MultiIndex.from_tuples([(("a", None), 1), (("b", None), 2)]) + index2 = MultiIndex.from_tuples([(("b", None), 2), (("a", None), 1)]) + df1_dtype = pd.DataFrame([1, 2], index=index) + df2_dtype = pd.DataFrame([2, 1], index=index2) + + result = df1_dtype.reindex_like(df2_dtype) + expected = df2_dtype + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_setops.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_setops.py new file mode 100644 index 0000000000000000000000000000000000000000..0abb56ecf9de7182dd38e45d5a8938cb81248034 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_setops.py @@ -0,0 +1,772 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalIndex, + DataFrame, + Index, + IntervalIndex, + MultiIndex, + Series, +) +import pandas._testing as tm +from pandas.api.types import ( + is_float_dtype, + is_unsigned_integer_dtype, +) + + +@pytest.mark.parametrize("case", [0.5, "xxx"]) +@pytest.mark.parametrize( + "method", ["intersection", "union", "difference", "symmetric_difference"] +) +def test_set_ops_error_cases(idx, case, sort, method): + # non-iterable input + msg = "Input must be Index or array-like" + with pytest.raises(TypeError, match=msg): + getattr(idx, method)(case, sort=sort) + + +@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list]) +def test_intersection_base(idx, sort, klass): + first = idx[2::-1] # first 3 elements reversed + second = idx[:5] + + if klass is not MultiIndex: + second = klass(second.values) + + intersect = first.intersection(second, sort=sort) + if sort is None: + expected = first.sort_values() + else: + expected = first + tm.assert_index_equal(intersect, expected) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.intersection([1, 2, 3], sort=sort) + + +@pytest.mark.arm_slow +@pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list]) +def test_union_base(idx, sort, klass): + first = idx[::-1] + second = idx[:5] + + if klass is not MultiIndex: + second = klass(second.values) + + union = first.union(second, sort=sort) + if sort is None: + expected = first.sort_values() + else: + expected = first + tm.assert_index_equal(union, expected) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.union([1, 2, 3], sort=sort) + + +def test_difference_base(idx, sort): + second = idx[4:] + answer = idx[:4] + result = idx.difference(second, sort=sort) + + if sort is None: + answer = answer.sort_values() + + assert result.equals(answer) + tm.assert_index_equal(result, answer) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = idx.difference(case, sort=sort) + tm.assert_index_equal(result, answer) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + idx.difference([1, 2, 3], sort=sort) + + +def test_symmetric_difference(idx, sort): + first = idx[1:] + second = idx[:-1] + answer = idx[[-1, 0]] + result = first.symmetric_difference(second, sort=sort) + + if sort is None: + answer = answer.sort_values() + + tm.assert_index_equal(result, answer) + + # GH 10149 + cases = [klass(second.values) for klass in [np.array, Series, list]] + for case in cases: + result = first.symmetric_difference(case, sort=sort) + tm.assert_index_equal(result, answer) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.symmetric_difference([1, 2, 3], sort=sort) + + +def test_multiindex_symmetric_difference(): + # GH 13490 + idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=["a", "b"]) + result = idx.symmetric_difference(idx) + assert result.names == idx.names + + idx2 = idx.copy().rename(["A", "B"]) + result = idx.symmetric_difference(idx2) + assert result.names == [None, None] + + +def test_empty(idx): + # GH 15270 + assert not idx.empty + assert idx[:0].empty + + +def test_difference(idx, sort): + first = idx + result = first.difference(idx[-3:], sort=sort) + vals = idx[:-3].values + + if sort is None: + vals = sorted(vals) + + expected = MultiIndex.from_tuples(vals, sortorder=0, names=idx.names) + + assert isinstance(result, MultiIndex) + assert result.equals(expected) + assert result.names == idx.names + tm.assert_index_equal(result, expected) + + # empty difference: reflexive + result = idx.difference(idx, sort=sort) + expected = idx[:0] + assert result.equals(expected) + assert result.names == idx.names + + # empty difference: superset + result = idx[-3:].difference(idx, sort=sort) + expected = idx[:0] + assert result.equals(expected) + assert result.names == idx.names + + # empty difference: degenerate + result = idx[:0].difference(idx, sort=sort) + expected = idx[:0] + assert result.equals(expected) + assert result.names == idx.names + + # names not the same + chunklet = idx[-3:] + chunklet.names = ["foo", "baz"] + result = first.difference(chunklet, sort=sort) + assert result.names == (None, None) + + # empty, but non-equal + result = idx.difference(idx.sortlevel(1)[0], sort=sort) + assert len(result) == 0 + + # raise Exception called with non-MultiIndex + result = first.difference(first.values, sort=sort) + assert result.equals(first[:0]) + + # name from empty array + result = first.difference([], sort=sort) + assert first.equals(result) + assert first.names == result.names + + # name from non-empty array + result = first.difference([("foo", "one")], sort=sort) + expected = MultiIndex.from_tuples( + [("bar", "one"), ("baz", "two"), ("foo", "two"), ("qux", "one"), ("qux", "two")] + ) + expected.names = first.names + assert first.names == result.names + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + first.difference([1, 2, 3, 4, 5], sort=sort) + + +def test_difference_sort_special(): + # GH-24959 + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + # sort=None, the default + result = idx.difference([]) + tm.assert_index_equal(result, idx) + + +def test_difference_sort_special_true(): + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + result = idx.difference([], sort=True) + expected = MultiIndex.from_product([[0, 1], ["a", "b"]]) + tm.assert_index_equal(result, expected) + + +def test_difference_sort_incomparable(): + # GH-24959 + idx = MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]]) + + other = MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]]) + # sort=None, the default + msg = "sort order is undefined for incomparable objects" + with tm.assert_produces_warning(RuntimeWarning, match=msg): + result = idx.difference(other) + tm.assert_index_equal(result, idx) + + # sort=False + result = idx.difference(other, sort=False) + tm.assert_index_equal(result, idx) + + +def test_difference_sort_incomparable_true(): + idx = MultiIndex.from_product([[1, pd.Timestamp("2000"), 2], ["a", "b"]]) + other = MultiIndex.from_product([[3, pd.Timestamp("2000"), 4], ["c", "d"]]) + + # TODO: this is raising in constructing a Categorical when calling + # algos.safe_sort. Should we catch and re-raise with a better message? + msg = "'values' is not ordered, please explicitly specify the categories order " + with pytest.raises(TypeError, match=msg): + idx.difference(other, sort=True) + + +def test_union(idx, sort): + piece1 = idx[:5][::-1] + piece2 = idx[3:] + + the_union = piece1.union(piece2, sort=sort) + + if sort in (None, False): + tm.assert_index_equal(the_union.sort_values(), idx.sort_values()) + else: + tm.assert_index_equal(the_union, idx) + + # corner case, pass self or empty thing: + the_union = idx.union(idx, sort=sort) + tm.assert_index_equal(the_union, idx) + + the_union = idx.union(idx[:0], sort=sort) + tm.assert_index_equal(the_union, idx) + + tuples = idx.values + result = idx[:4].union(tuples[4:], sort=sort) + if sort is None: + tm.assert_index_equal(result.sort_values(), idx.sort_values()) + else: + assert result.equals(idx) + + +def test_union_with_regular_index(idx, using_infer_string): + other = Index(["A", "B", "C"]) + + result = other.union(idx) + assert ("foo", "one") in result + assert "B" in result + + if using_infer_string: + with pytest.raises(NotImplementedError, match="Can only union"): + idx.union(other) + else: + msg = "The values in the array are unorderable" + with tm.assert_produces_warning(RuntimeWarning, match=msg): + result2 = idx.union(other) + # This is more consistent now, if sorting fails then we don't sort at all + # in the MultiIndex case. + assert not result.equals(result2) + + +def test_intersection(idx, sort): + piece1 = idx[:5][::-1] + piece2 = idx[3:] + + the_int = piece1.intersection(piece2, sort=sort) + + if sort in (None, True): + tm.assert_index_equal(the_int, idx[3:5]) + else: + tm.assert_index_equal(the_int.sort_values(), idx[3:5]) + + # corner case, pass self + the_int = idx.intersection(idx, sort=sort) + tm.assert_index_equal(the_int, idx) + + # empty intersection: disjoint + empty = idx[:2].intersection(idx[2:], sort=sort) + expected = idx[:0] + assert empty.equals(expected) + + tuples = idx.values + result = idx.intersection(tuples) + assert result.equals(idx) + + +@pytest.mark.parametrize( + "method", ["intersection", "union", "difference", "symmetric_difference"] +) +def test_setop_with_categorical(idx, sort, method): + other = idx.to_flat_index().astype("category") + res_names = [None] * idx.nlevels + + result = getattr(idx, method)(other, sort=sort) + expected = getattr(idx, method)(idx, sort=sort).rename(res_names) + tm.assert_index_equal(result, expected) + + result = getattr(idx, method)(other[:5], sort=sort) + expected = getattr(idx, method)(idx[:5], sort=sort).rename(res_names) + tm.assert_index_equal(result, expected) + + +def test_intersection_non_object(idx, sort): + other = Index(range(3), name="foo") + + result = idx.intersection(other, sort=sort) + expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=None) + tm.assert_index_equal(result, expected, exact=True) + + # if we pass a length-0 ndarray (i.e. no name, we retain our idx.name) + result = idx.intersection(np.asarray(other)[:0], sort=sort) + expected = MultiIndex(levels=idx.levels, codes=[[]] * idx.nlevels, names=idx.names) + tm.assert_index_equal(result, expected, exact=True) + + msg = "other must be a MultiIndex or a list of tuples" + with pytest.raises(TypeError, match=msg): + # With non-zero length non-index, we try and fail to convert to tuples + idx.intersection(np.asarray(other), sort=sort) + + +def test_intersect_equal_sort(): + # GH-24959 + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + tm.assert_index_equal(idx.intersection(idx, sort=False), idx) + tm.assert_index_equal(idx.intersection(idx, sort=None), idx) + + +def test_intersect_equal_sort_true(): + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + expected = MultiIndex.from_product([[0, 1], ["a", "b"]]) + result = idx.intersection(idx, sort=True) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("slice_", [slice(None), slice(0)]) +def test_union_sort_other_empty(slice_): + # https://github.com/pandas-dev/pandas/issues/24959 + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + + # default, sort=None + other = idx[slice_] + tm.assert_index_equal(idx.union(other), idx) + tm.assert_index_equal(other.union(idx), idx) + + # sort=False + tm.assert_index_equal(idx.union(other, sort=False), idx) + + +def test_union_sort_other_empty_sort(): + idx = MultiIndex.from_product([[1, 0], ["a", "b"]]) + other = idx[:0] + result = idx.union(other, sort=True) + expected = MultiIndex.from_product([[0, 1], ["a", "b"]]) + tm.assert_index_equal(result, expected) + + +def test_union_sort_other_incomparable(): + # https://github.com/pandas-dev/pandas/issues/24959 + idx = MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]]) + + # default, sort=None + with tm.assert_produces_warning(RuntimeWarning): + result = idx.union(idx[:1]) + tm.assert_index_equal(result, idx) + + # sort=False + result = idx.union(idx[:1], sort=False) + tm.assert_index_equal(result, idx) + + +def test_union_sort_other_incomparable_sort(): + idx = MultiIndex.from_product([[1, pd.Timestamp("2000")], ["a", "b"]]) + msg = "'<' not supported between instances of 'Timestamp' and 'int'" + with pytest.raises(TypeError, match=msg): + idx.union(idx[:1], sort=True) + + +def test_union_non_object_dtype_raises(): + # GH#32646 raise NotImplementedError instead of less-informative error + mi = MultiIndex.from_product([["a", "b"], [1, 2]]) + + idx = mi.levels[1] + + msg = "Can only union MultiIndex with MultiIndex or Index of tuples" + with pytest.raises(NotImplementedError, match=msg): + mi.union(idx) + + +def test_union_empty_self_different_names(): + # GH#38423 + mi = MultiIndex.from_arrays([[]]) + mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"]) + result = mi.union(mi2) + expected = MultiIndex.from_arrays([[1, 2], [3, 4]]) + tm.assert_index_equal(result, expected) + + +def test_union_multiindex_empty_rangeindex(): + # GH#41234 + mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"]) + ri = pd.RangeIndex(0) + + result_left = mi.union(ri) + tm.assert_index_equal(mi, result_left, check_names=False) + + result_right = ri.union(mi) + tm.assert_index_equal(mi, result_right, check_names=False) + + +@pytest.mark.parametrize( + "method", ["union", "intersection", "difference", "symmetric_difference"] +) +def test_setops_sort_validation(method): + idx1 = MultiIndex.from_product([["a", "b"], [1, 2]]) + idx2 = MultiIndex.from_product([["b", "c"], [1, 2]]) + + with pytest.raises(ValueError, match="The 'sort' keyword only takes"): + getattr(idx1, method)(idx2, sort=2) + + # sort=True is supported as of GH#? + getattr(idx1, method)(idx2, sort=True) + + +@pytest.mark.parametrize("val", [pd.NA, 100]) +def test_difference_keep_ea_dtypes(any_numeric_ea_dtype, val): + # GH#48606 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [2, 1]], names=["a", None] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 2, val], dtype=any_numeric_ea_dtype), [1, 1, 3]] + ) + result = midx.difference(midx2) + expected = MultiIndex.from_arrays([Series([1], dtype=any_numeric_ea_dtype), [2]]) + tm.assert_index_equal(result, expected) + + result = midx.difference(midx.sort_values(ascending=False)) + expected = MultiIndex.from_arrays( + [Series([], dtype=any_numeric_ea_dtype), Series([], dtype=np.int64)], + names=["a", None], + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("val", [pd.NA, 5]) +def test_symmetric_difference_keeping_ea_dtype(any_numeric_ea_dtype, val): + # GH#48607 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [2, 1]], names=["a", None] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 2, val], dtype=any_numeric_ea_dtype), [1, 1, 3]] + ) + result = midx.symmetric_difference(midx2) + expected = MultiIndex.from_arrays( + [Series([1, 1, val], dtype=any_numeric_ea_dtype), [1, 2, 3]] + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + ("tuples", "exp_tuples"), + [ + ([("val1", "test1")], [("val1", "test1")]), + ([("val1", "test1"), ("val1", "test1")], [("val1", "test1")]), + ( + [("val2", "test2"), ("val1", "test1")], + [("val2", "test2"), ("val1", "test1")], + ), + ], +) +def test_intersect_with_duplicates(tuples, exp_tuples): + # GH#36915 + left = MultiIndex.from_tuples(tuples, names=["first", "second"]) + right = MultiIndex.from_tuples( + [("val1", "test1"), ("val1", "test1"), ("val2", "test2")], + names=["first", "second"], + ) + result = left.intersection(right) + expected = MultiIndex.from_tuples(exp_tuples, names=["first", "second"]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "data, names, expected", + [ + ((1,), None, [None, None]), + ((1,), ["a"], [None, None]), + ((1,), ["b"], [None, None]), + ((1, 2), ["c", "d"], [None, None]), + ((1, 2), ["b", "a"], [None, None]), + ((1, 2, 3), ["a", "b", "c"], [None, None]), + ((1, 2), ["a", "c"], ["a", None]), + ((1, 2), ["c", "b"], [None, "b"]), + ((1, 2), ["a", "b"], ["a", "b"]), + ((1, 2), [None, "b"], [None, "b"]), + ], +) +def test_maybe_match_names(data, names, expected): + # GH#38323 + mi = MultiIndex.from_tuples([], names=["a", "b"]) + mi2 = MultiIndex.from_tuples([data], names=names) + result = mi._maybe_match_names(mi2) + assert result == expected + + +def test_intersection_equal_different_names(): + # GH#30302 + mi1 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["c", "b"]) + mi2 = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["a", "b"]) + + result = mi1.intersection(mi2) + expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=[None, "b"]) + tm.assert_index_equal(result, expected) + + +def test_intersection_different_names(): + # GH#38323 + mi = MultiIndex.from_arrays([[1], [3]], names=["c", "b"]) + mi2 = MultiIndex.from_arrays([[1], [3]]) + result = mi.intersection(mi2) + tm.assert_index_equal(result, mi2) + + +def test_intersection_with_missing_values_on_both_sides(nulls_fixture): + # GH#38623 + mi1 = MultiIndex.from_arrays([[3, nulls_fixture, 4, nulls_fixture], [1, 2, 4, 2]]) + mi2 = MultiIndex.from_arrays([[3, nulls_fixture, 3], [1, 2, 4]]) + result = mi1.intersection(mi2) + expected = MultiIndex.from_arrays([[3, nulls_fixture], [1, 2]]) + tm.assert_index_equal(result, expected) + + +def test_union_with_missing_values_on_both_sides(nulls_fixture): + # GH#38623 + mi1 = MultiIndex.from_arrays([[1, nulls_fixture]]) + mi2 = MultiIndex.from_arrays([[1, nulls_fixture, 3]]) + result = mi1.union(mi2) + expected = MultiIndex.from_arrays([[1, 3, nulls_fixture]]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +@pytest.mark.parametrize("sort", [None, False]) +def test_union_nan_got_duplicated(dtype, sort): + # GH#38977, GH#49010 + mi1 = MultiIndex.from_arrays([pd.array([1.0, np.nan], dtype=dtype), [2, 3]]) + mi2 = MultiIndex.from_arrays([pd.array([1.0, np.nan, 3.0], dtype=dtype), [2, 3, 4]]) + result = mi1.union(mi2, sort=sort) + if sort is None: + expected = MultiIndex.from_arrays( + [pd.array([1.0, 3.0, np.nan], dtype=dtype), [2, 4, 3]] + ) + else: + expected = mi2 + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("val", [4, 1]) +def test_union_keep_ea_dtype(any_numeric_ea_dtype, val): + # GH#48505 + + arr1 = Series([val, 2], dtype=any_numeric_ea_dtype) + arr2 = Series([2, 1], dtype=any_numeric_ea_dtype) + midx = MultiIndex.from_arrays([arr1, [1, 2]], names=["a", None]) + midx2 = MultiIndex.from_arrays([arr2, [2, 1]]) + result = midx.union(midx2) + if val == 4: + expected = MultiIndex.from_arrays( + [Series([1, 2, 4], dtype=any_numeric_ea_dtype), [1, 2, 1]] + ) + else: + expected = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [1, 2]] + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dupe_val", [3, pd.NA]) +def test_union_with_duplicates_keep_ea_dtype(dupe_val, any_numeric_ea_dtype): + # GH48900 + mi1 = MultiIndex.from_arrays( + [ + Series([1, dupe_val, 2], dtype=any_numeric_ea_dtype), + Series([1, dupe_val, 2], dtype=any_numeric_ea_dtype), + ] + ) + mi2 = MultiIndex.from_arrays( + [ + Series([2, dupe_val, dupe_val], dtype=any_numeric_ea_dtype), + Series([2, dupe_val, dupe_val], dtype=any_numeric_ea_dtype), + ] + ) + result = mi1.union(mi2) + expected = MultiIndex.from_arrays( + [ + Series([1, 2, dupe_val, dupe_val], dtype=any_numeric_ea_dtype), + Series([1, 2, dupe_val, dupe_val], dtype=any_numeric_ea_dtype), + ] + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_union_duplicates(index, request): + # GH#38977 + if index.empty or isinstance(index, (IntervalIndex, CategoricalIndex)): + pytest.skip(f"No duplicates in an empty {type(index).__name__}") + + values = index.unique().values.tolist() + mi1 = MultiIndex.from_arrays([values, [1] * len(values)]) + mi2 = MultiIndex.from_arrays([[values[0]] + values, [1] * (len(values) + 1)]) + result = mi2.union(mi1) + expected = mi2.sort_values() + tm.assert_index_equal(result, expected) + + if ( + is_unsigned_integer_dtype(mi2.levels[0]) + and (mi2.get_level_values(0) < 2**63).all() + ): + # GH#47294 - union uses lib.fast_zip, converting data to Python integers + # and loses type information. Result is then unsigned only when values are + # sufficiently large to require unsigned dtype. This happens only if other + # has dups or one of both have missing values + expected = expected.set_levels( + [expected.levels[0].astype(np.int64), expected.levels[1]] + ) + elif is_float_dtype(mi2.levels[0]): + # mi2 has duplicates witch is a different path than above, Fix that path + # to use correct float dtype? + expected = expected.set_levels( + [expected.levels[0].astype(float), expected.levels[1]] + ) + + result = mi1.union(mi2) + tm.assert_index_equal(result, expected) + + +def test_union_keep_dtype_precision(any_real_numeric_dtype): + # GH#48498 + arr1 = Series([4, 1, 1], dtype=any_real_numeric_dtype) + arr2 = Series([1, 4], dtype=any_real_numeric_dtype) + midx = MultiIndex.from_arrays([arr1, [2, 1, 1]], names=["a", None]) + midx2 = MultiIndex.from_arrays([arr2, [1, 2]], names=["a", None]) + + result = midx.union(midx2) + expected = MultiIndex.from_arrays( + ([Series([1, 1, 4], dtype=any_real_numeric_dtype), [1, 1, 2]]), + names=["a", None], + ) + tm.assert_index_equal(result, expected) + + +def test_union_keep_ea_dtype_with_na(any_numeric_ea_dtype): + # GH#48498 + arr1 = Series([4, pd.NA], dtype=any_numeric_ea_dtype) + arr2 = Series([1, pd.NA], dtype=any_numeric_ea_dtype) + midx = MultiIndex.from_arrays([arr1, [2, 1]], names=["a", None]) + midx2 = MultiIndex.from_arrays([arr2, [1, 2]]) + result = midx.union(midx2) + expected = MultiIndex.from_arrays( + [Series([1, 4, pd.NA, pd.NA], dtype=any_numeric_ea_dtype), [1, 2, 1, 2]] + ) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "levels1, levels2, codes1, codes2, names", + [ + ( + [["a", "b", "c"], [0, ""]], + [["c", "d", "b"], [""]], + [[0, 1, 2], [1, 1, 1]], + [[0, 1, 2], [0, 0, 0]], + ["name1", "name2"], + ), + ], +) +def test_intersection_lexsort_depth(levels1, levels2, codes1, codes2, names): + # GH#25169 + mi1 = MultiIndex(levels=levels1, codes=codes1, names=names) + mi2 = MultiIndex(levels=levels2, codes=codes2, names=names) + mi_int = mi1.intersection(mi2) + assert mi_int._lexsort_depth == 2 + + +@pytest.mark.parametrize( + "a", + [pd.Categorical(["a", "b"], categories=["a", "b"]), ["a", "b"]], +) +@pytest.mark.parametrize( + "b", + [ + pd.Categorical(["a", "b"], categories=["b", "a"], ordered=True), + pd.Categorical(["a", "b"], categories=["b", "a"]), + ], +) +def test_intersection_with_non_lex_sorted_categories(a, b): + # GH#49974 + other = ["1", "2"] + + df1 = DataFrame({"x": a, "y": other}) + df2 = DataFrame({"x": b, "y": other}) + + expected = MultiIndex.from_arrays([a, other], names=["x", "y"]) + + res1 = MultiIndex.from_frame(df1).intersection( + MultiIndex.from_frame(df2.sort_values(["x", "y"])) + ) + res2 = MultiIndex.from_frame(df1).intersection(MultiIndex.from_frame(df2)) + res3 = MultiIndex.from_frame(df1.sort_values(["x", "y"])).intersection( + MultiIndex.from_frame(df2) + ) + res4 = MultiIndex.from_frame(df1.sort_values(["x", "y"])).intersection( + MultiIndex.from_frame(df2.sort_values(["x", "y"])) + ) + + tm.assert_index_equal(res1, expected) + tm.assert_index_equal(res2, expected) + tm.assert_index_equal(res3, expected) + tm.assert_index_equal(res4, expected) + + +@pytest.mark.parametrize("val", [pd.NA, 100]) +def test_intersection_keep_ea_dtypes(val, any_numeric_ea_dtype): + # GH#48604 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [2, 1]], names=["a", None] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 2, val], dtype=any_numeric_ea_dtype), [1, 1, 3]] + ) + result = midx.intersection(midx2) + expected = MultiIndex.from_arrays([Series([2], dtype=any_numeric_ea_dtype), [1]]) + tm.assert_index_equal(result, expected) + + +def test_union_with_na_when_constructing_dataframe(): + # GH43222 + series1 = Series( + (1,), + index=MultiIndex.from_arrays( + [Series([None], dtype="string"), Series([None], dtype="string")] + ), + ) + series2 = Series((10, 20), index=MultiIndex.from_tuples(((None, None), ("a", "b")))) + result = DataFrame([series1, series2]) + expected = DataFrame({(np.nan, np.nan): [1.0, 10.0], ("a", "b"): [np.nan, 20.0]}) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_take.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_take.py new file mode 100644 index 0000000000000000000000000000000000000000..543cba25c373b71b8c79c7fe0ea5ae2fb7f40b18 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_take.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_take(idx): + indexer = [4, 3, 0, 2] + result = idx.take(indexer) + expected = idx[indexer] + assert result.equals(expected) + + # GH 10791 + msg = "'MultiIndex' object has no attribute 'freq'" + with pytest.raises(AttributeError, match=msg): + idx.freq + + +def test_take_invalid_kwargs(idx): + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + +def test_take_fill_value(): + # GH 12631 + vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]] + idx = pd.MultiIndex.from_product(vals, names=["str", "dt"]) + + result = idx.take(np.array([1, 0, -1])) + exp_vals = [ + ("A", pd.Timestamp("2011-01-02")), + ("A", pd.Timestamp("2011-01-01")), + ("B", pd.Timestamp("2011-01-02")), + ] + expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"]) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + exp_vals = [ + ("A", pd.Timestamp("2011-01-02")), + ("A", pd.Timestamp("2011-01-01")), + (np.nan, pd.NaT), + ] + expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"]) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + exp_vals = [ + ("A", pd.Timestamp("2011-01-02")), + ("A", pd.Timestamp("2011-01-01")), + ("B", pd.Timestamp("2011-01-02")), + ] + expected = pd.MultiIndex.from_tuples(exp_vals, names=["str", "dt"]) + tm.assert_index_equal(result, expected) + + msg = "When allow_fill=True and fill_value is not None, all indices must be >= -1" + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for( axis 0 with)? size 4" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5]))