diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc9a29af844739f89ddac58c61752497f7c9e1c5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37c876f99c3677f9037d69aa09f607e75f93b4c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_any_index.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_any_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c66c117146931be62cc3a26069ca1c7e93a3d331 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_any_index.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..225c3412cf06f6f0dbbe2fb38aa68d3e54a8ca4a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..419bcc0f49fb0f7fbfd0c6bbd28e97878fd0f24e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_datetimelike.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_datetimelike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d70d7cb5bf0d13f53bd2e3d4f0d152628420108 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_datetimelike.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_engines.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_engines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a442fa8b852935798033da09db4cf2be5f2fb93 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_engines.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_frozen.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_frozen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c78a197d44c4b5379a66beada181501381b57c2e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_frozen.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_index_new.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_index_new.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d087d3c442c73ff193a04b81e1ab148af99b64c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_index_new.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_indexing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f9e00b3fc41a5ff00bcac00f368208acfdce647 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_numpy_compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_numpy_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d247bba4c5529bda330837dd2217ddfaab81ee70 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_numpy_compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_old_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_old_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be8b4b93e59949ac89298333af62135f7225cc93 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_old_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_setops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_setops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19dc18d31a9d2dbcb5bda04b07b4b4083cc599a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_setops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_subclass.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5061b49f0cc80353529b1c3091e52f46617055da Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_subclass.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56178e4ae7d286081155e917d4520511e604add5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8202cfa6ee3048170171b368b1b325dffe280251 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c96868eb0fee091f23e11752141ecd0098878aa3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cfbf39410d12b62812d1a2cf612ac3059edf022 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5eefc57640f42284533f437bd0a62ede44b41f5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92df3694b65fc6425896cd4d589e3f058742439d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f411dcfdec90772d9a4099c4303122a749acd060 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..a17627b7515b26b1fcfdca0feec376f03a018e83 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py @@ -0,0 +1,90 @@ +from datetime import date + +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, + IntervalIndex, +) +import pandas._testing as tm + + +class TestAstype: + def test_astype(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + result = ci.astype(object) + tm.assert_index_equal(result, Index(np.array(ci), dtype=object)) + + # this IS equal, but not the same class + assert result.equals(ci) + assert isinstance(result, Index) + assert not isinstance(result, CategoricalIndex) + + # interval + ii = IntervalIndex.from_arrays(left=[-0.001, 2.0], right=[2, 4], closed="right") + + ci = CategoricalIndex( + Categorical.from_codes([0, 1, -1], categories=ii, ordered=True) + ) + + result = ci.astype("interval") + expected = ii.take([0, 1, -1], allow_fill=True, fill_value=np.nan) + tm.assert_index_equal(result, expected) + + result = IntervalIndex(result.values) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize("dtype_ordered", [True, False]) + @pytest.mark.parametrize("index_ordered", [True, False]) + def test_astype_category(self, name, dtype_ordered, index_ordered): + # GH#18630 + index = CategoricalIndex( + list("aabbca"), categories=list("cab"), ordered=index_ordered + ) + if name: + index = index.rename(name) + + # standard categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = index.astype(dtype) + expected = CategoricalIndex( + index.tolist(), + name=name, + categories=index.categories, + ordered=dtype_ordered, + ) + tm.assert_index_equal(result, expected) + + # non-standard categories + dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered) + result = index.astype(dtype) + expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype) + tm.assert_index_equal(result, expected) + + if dtype_ordered is False: + # dtype='category' can't specify ordered, so only test once + result = index.astype("category") + expected = index + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("box", [True, False]) + def test_categorical_date_roundtrip(self, box): + # astype to categorical and back should preserve date objects + v = date.today() + + obj = Index([v, v]) + assert obj.dtype == object + if box: + obj = obj.array + + cat = obj.astype("category") + + rtrip = cat.astype(object) + assert rtrip.dtype == object + assert type(rtrip[0]) is date diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py new file mode 100644 index 0000000000000000000000000000000000000000..03a298a13dc2b45b3e78ec2d6390741709e42590 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py @@ -0,0 +1,394 @@ +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import index as libindex +from pandas._libs.arrays import NDArrayBacked + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, +) +import pandas._testing as tm +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, +) + + +class TestCategoricalIndex: + @pytest.fixture + def simple_index(self) -> CategoricalIndex: + return CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + def test_can_hold_identifiers(self): + idx = CategoricalIndex(list("aabbca"), categories=None, ordered=False) + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is True + + def test_insert(self, simple_index): + ci = simple_index + categories = ci.categories + + # test 0th element + result = ci.insert(0, "a") + expected = CategoricalIndex(list("aaabbca"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # test Nth element that follows Python list behavior + result = ci.insert(-1, "a") + expected = CategoricalIndex(list("aabbcaa"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # test empty + result = CategoricalIndex([], categories=categories).insert(0, "a") + expected = CategoricalIndex(["a"], categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # invalid -> cast to object + expected = ci.astype(object).insert(0, "d") + result = ci.insert(0, "d").astype(object) + tm.assert_index_equal(result, expected, exact=True) + + # GH 18295 (test missing) + expected = CategoricalIndex(["a", np.nan, "a", "b", "c", "b"]) + for na in (np.nan, pd.NaT, None): + result = CategoricalIndex(list("aabcb")).insert(1, na) + tm.assert_index_equal(result, expected) + + def test_insert_na_mismatched_dtype(self): + ci = CategoricalIndex([0, 1, 1]) + result = ci.insert(0, pd.NaT) + expected = Index([pd.NaT, 0, 1, 1], dtype=object) + tm.assert_index_equal(result, expected) + + def test_delete(self, simple_index): + ci = simple_index + categories = ci.categories + + result = ci.delete(0) + expected = CategoricalIndex(list("abbca"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + result = ci.delete(-1) + expected = CategoricalIndex(list("aabbc"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + with tm.external_error_raised((IndexError, ValueError)): + # Either depending on NumPy version + ci.delete(10) + + @pytest.mark.parametrize( + "data, non_lexsorted_data", + [[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]], + ) + def test_is_monotonic(self, data, non_lexsorted_data): + c = CategoricalIndex(data) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(data, ordered=True) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(data, categories=reversed(data)) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is True + + c = CategoricalIndex(data, categories=reversed(data), ordered=True) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is True + + # test when data is neither monotonic increasing nor decreasing + reordered_data = [data[0], data[2], data[1]] + c = CategoricalIndex(reordered_data, categories=reversed(data)) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is False + + # non lexsorted categories + categories = non_lexsorted_data + + c = CategoricalIndex(categories[:2], categories=categories) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(categories[1:3], categories=categories) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + def test_has_duplicates(self): + idx = CategoricalIndex([0, 0, 0], name="foo") + assert idx.is_unique is False + assert idx.has_duplicates is True + + idx = CategoricalIndex([0, 1], categories=[2, 3], name="foo") + assert idx.is_unique is False + assert idx.has_duplicates is True + + idx = CategoricalIndex([0, 1, 2, 3], categories=[1, 2, 3], name="foo") + assert idx.is_unique is True + assert idx.has_duplicates is False + + @pytest.mark.parametrize( + "data, categories, expected", + [ + ( + [1, 1, 1], + [1, 2, 3], + { + "first": np.array([False, True, True]), + "last": np.array([True, True, False]), + False: np.array([True, True, True]), + }, + ), + ( + [1, 1, 1], + list("abc"), + { + "first": np.array([False, True, True]), + "last": np.array([True, True, False]), + False: np.array([True, True, True]), + }, + ), + ( + [2, "a", "b"], + list("abc"), + { + "first": np.zeros(shape=(3), dtype=np.bool_), + "last": np.zeros(shape=(3), dtype=np.bool_), + False: np.zeros(shape=(3), dtype=np.bool_), + }, + ), + ( + list("abb"), + list("abc"), + { + "first": np.array([False, False, True]), + "last": np.array([False, True, False]), + False: np.array([False, True, True]), + }, + ), + ], + ) + def test_drop_duplicates(self, data, categories, expected): + idx = CategoricalIndex(data, categories=categories, name="foo") + for keep, e in expected.items(): + tm.assert_numpy_array_equal(idx.duplicated(keep=keep), e) + e = idx[~e] + result = idx.drop_duplicates(keep=keep) + tm.assert_index_equal(result, e) + + @pytest.mark.parametrize( + "data, categories, expected_data", + [ + ([1, 1, 1], [1, 2, 3], [1]), + ([1, 1, 1], list("abc"), [np.nan]), + ([1, 2, "a"], [1, 2, 3], [1, 2, np.nan]), + ([2, "a", "b"], list("abc"), [np.nan, "a", "b"]), + ], + ) + def test_unique(self, data, categories, expected_data, ordered): + dtype = CategoricalDtype(categories, ordered=ordered) + + idx = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(expected_data, dtype=dtype) + tm.assert_index_equal(idx.unique(), expected) + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="repr doesn't roundtrip") + def test_repr_roundtrip(self): + ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + str(ci) + tm.assert_index_equal(eval(repr(ci)), ci, exact=True) + + # formatting + str(ci) + + # long format + # this is not reprable + ci = CategoricalIndex(np.random.default_rng(2).integers(0, 5, size=100)) + str(ci) + + def test_isin(self): + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + tm.assert_numpy_array_equal( + ci.isin(["c"]), np.array([False, False, False, True, False, False]) + ) + tm.assert_numpy_array_equal( + ci.isin(["c", "a", "b"]), np.array([True] * 5 + [False]) + ) + tm.assert_numpy_array_equal( + ci.isin(["c", "a", "b", np.nan]), np.array([True] * 6) + ) + + # mismatched categorical -> coerced to ndarray so doesn't matter + result = ci.isin(ci.set_categories(list("abcdefghi"))) + expected = np.array([True] * 6) + tm.assert_numpy_array_equal(result, expected) + + result = ci.isin(ci.set_categories(list("defghi"))) + expected = np.array([False] * 5 + [True]) + tm.assert_numpy_array_equal(result, expected) + + def test_isin_overlapping_intervals(self): + # GH 34974 + idx = pd.IntervalIndex([pd.Interval(0, 2), pd.Interval(0, 1)]) + result = CategoricalIndex(idx).isin(idx) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_identical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + assert ci1.identical(ci1) + assert ci1.identical(ci1.copy()) + assert not ci1.identical(ci2) + + def test_ensure_copied_data(self): + # gh-12309: Check the "copy" argument of each + # Index.__new__ is honored. + # + # Must be tested separately from other indexes because + # self.values is not an ndarray. + index = CategoricalIndex(list("ab") * 5) + + result = CategoricalIndex(index.values, copy=True) + tm.assert_index_equal(index, result) + assert not np.shares_memory(result._data._codes, index._data._codes) + + result = CategoricalIndex(index.values, copy=False) + assert result._data._codes is index._data._codes + + +class TestCategoricalIndex2: + def test_view_i8(self): + # GH#25464 + ci = CategoricalIndex(list("ab") * 50) + msg = "When changing to a larger dtype, its size must be a divisor" + with pytest.raises(ValueError, match=msg): + ci.view("i8") + with pytest.raises(ValueError, match=msg): + ci._data.view("i8") + + ci = ci[:-4] # length divisible by 8 + + res = ci.view("i8") + expected = ci._data.codes.view("i8") + tm.assert_numpy_array_equal(res, expected) + + cat = ci._data + tm.assert_numpy_array_equal(cat.view("i8"), expected) + + @pytest.mark.parametrize( + "dtype, engine_type", + [ + (np.int8, libindex.Int8Engine), + (np.int16, libindex.Int16Engine), + (np.int32, libindex.Int32Engine), + (np.int64, libindex.Int64Engine), + ], + ) + def test_engine_type(self, dtype, engine_type): + if dtype != np.int64: + # num. of uniques required to push CategoricalIndex.codes to a + # dtype (128 categories required for .codes dtype to be int16 etc.) + num_uniques = {np.int8: 1, np.int16: 128, np.int32: 32768}[dtype] + ci = CategoricalIndex(range(num_uniques)) + else: + # having 2**32 - 2**31 categories would be very memory-intensive, + # so we cheat a bit with the dtype + ci = CategoricalIndex(range(32768)) # == 2**16 - 2**(16 - 1) + arr = ci.values._ndarray.astype("int64") + NDArrayBacked.__init__(ci._data, arr, ci.dtype) + assert np.issubdtype(ci.codes.dtype, dtype) + assert isinstance(ci._engine, engine_type) + + @pytest.mark.parametrize( + "func,op_name", + [ + (lambda idx: idx - idx, "__sub__"), + (lambda idx: idx + idx, "__add__"), + (lambda idx: idx - ["a", "b"], "__sub__"), + (lambda idx: idx + ["a", "b"], "__add__"), + (lambda idx: ["a", "b"] - idx, "__rsub__"), + (lambda idx: ["a", "b"] + idx, "__radd__"), + ], + ) + def test_disallow_addsub_ops(self, func, op_name): + # GH 10039 + # set ops (+/-) raise TypeError + idx = Index(Categorical(["a", "b"])) + cat_or_list = "'(Categorical|list)' and '(Categorical|list)'" + msg = "|".join( + [ + f"cannot perform {op_name} with this index type: CategoricalIndex", + "can only concatenate list", + rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}", + ] + ) + with pytest.raises(TypeError, match=msg): + func(idx) + + def test_method_delegation(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.set_categories(list("cab")) + tm.assert_index_equal( + result, CategoricalIndex(list("aabbca"), categories=list("cab")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.rename_categories(list("efg")) + tm.assert_index_equal( + result, CategoricalIndex(list("ffggef"), categories=list("efg")) + ) + + # GH18862 (let rename_categories take callables) + result = ci.rename_categories(lambda x: x.upper()) + tm.assert_index_equal( + result, CategoricalIndex(list("AABBCA"), categories=list("CAB")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.add_categories(["d"]) + tm.assert_index_equal( + result, CategoricalIndex(list("aabbca"), categories=list("cabd")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.remove_categories(["c"]) + tm.assert_index_equal( + result, + CategoricalIndex(list("aabb") + [np.nan] + ["a"], categories=list("ab")), + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.as_unordered() + tm.assert_index_equal(result, ci) + + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.as_ordered() + tm.assert_index_equal( + result, + CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=True), + ) + + # invalid + msg = "cannot use inplace with CategoricalIndex" + with pytest.raises(ValueError, match=msg): + ci.set_categories(list("cab"), inplace=True) + + def test_remove_maintains_order(self): + ci = CategoricalIndex(list("abcdda"), categories=list("abcd")) + result = ci.reorder_categories(["d", "c", "b", "a"], ordered=True) + tm.assert_index_equal( + result, + CategoricalIndex(list("abcdda"), categories=list("dcba"), ordered=True), + ) + result = result.remove_categories(["c"]) + tm.assert_index_equal( + result, + CategoricalIndex( + ["a", "b", np.nan, "d", "d", "a"], categories=list("dba"), ordered=True + ), + ) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..f0c5307fc5c641ff25d26bd2bd8a158b43dd6a6d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py @@ -0,0 +1,142 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +class TestCategoricalIndexConstructors: + def test_construction_disallows_scalar(self): + msg = "must be called with a collection of some kind" + with pytest.raises(TypeError, match=msg): + CategoricalIndex(data=1, categories=list("abcd"), ordered=False) + with pytest.raises(TypeError, match=msg): + CategoricalIndex(categories=list("abcd"), ordered=False) + + def test_construction(self): + ci = CategoricalIndex(list("aabbca"), categories=list("abcd"), ordered=False) + categories = ci.categories + + result = Index(ci) + tm.assert_index_equal(result, ci, exact=True) + assert not result.ordered + + result = Index(ci.values) + tm.assert_index_equal(result, ci, exact=True) + assert not result.ordered + + # empty + result = CategoricalIndex([], categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal(result.codes, np.array([], dtype="int8")) + assert not result.ordered + + # passing categories + result = CategoricalIndex(list("aabbca"), categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + + c = Categorical(list("aabbca")) + result = CategoricalIndex(c) + tm.assert_index_equal(result.categories, Index(list("abc"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(c, categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + ci = CategoricalIndex(c, categories=list("abcd")) + result = CategoricalIndex(ci) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(ci, categories=list("ab")) + tm.assert_index_equal(result.categories, Index(list("ab"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(ci, categories=list("ab"), ordered=True) + tm.assert_index_equal(result.categories, Index(list("ab"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8") + ) + assert result.ordered + + result = CategoricalIndex(ci, categories=list("ab"), ordered=True) + expected = CategoricalIndex( + ci, categories=list("ab"), ordered=True, dtype="category" + ) + tm.assert_index_equal(result, expected, exact=True) + + # turn me to an Index + result = Index(np.array(ci)) + assert isinstance(result, Index) + assert not isinstance(result, CategoricalIndex) + + def test_construction_with_dtype(self): + # specify dtype + ci = CategoricalIndex(list("aabbca"), categories=list("abc"), ordered=False) + + result = Index(np.array(ci), dtype="category") + tm.assert_index_equal(result, ci, exact=True) + + result = Index(np.array(ci).tolist(), dtype="category") + tm.assert_index_equal(result, ci, exact=True) + + # these are generally only equal when the categories are reordered + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + result = Index(np.array(ci), dtype="category").reorder_categories(ci.categories) + tm.assert_index_equal(result, ci, exact=True) + + # make sure indexes are handled + idx = Index(range(3)) + expected = CategoricalIndex([0, 1, 2], categories=idx, ordered=True) + result = CategoricalIndex(idx, categories=idx, ordered=True) + tm.assert_index_equal(result, expected, exact=True) + + def test_construction_empty_with_bool_categories(self): + # see GH#22702 + cat = CategoricalIndex([], categories=[True, False]) + categories = sorted(cat.categories.tolist()) + assert categories == [False, True] + + def test_construction_with_categorical_dtype(self): + # construction with CategoricalDtype + # GH#18109 + data, cats, ordered = "a a b b".split(), "c b a".split(), True + dtype = CategoricalDtype(categories=cats, ordered=ordered) + + result = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(data, categories=cats, ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # GH#19032 + result = Index(data, dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + # error when combining categories/ordered and dtype kwargs + msg = "Cannot specify `categories` or `ordered` together with `dtype`." + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, categories=cats, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, ordered=ordered, dtype=dtype) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py new file mode 100644 index 0000000000000000000000000000000000000000..a8353f301a3c39a50b2a0c5541722551ff660e30 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py @@ -0,0 +1,96 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + MultiIndex, +) + + +class TestEquals: + def test_equals_categorical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + + assert ci1.equals(ci1) + assert not ci1.equals(ci2) + assert ci1.equals(ci1.astype(object)) + assert ci1.astype(object).equals(ci1) + + assert (ci1 == ci1).all() + assert not (ci1 != ci1).all() + assert not (ci1 > ci1).all() + assert not (ci1 < ci1).all() + assert (ci1 <= ci1).all() + assert (ci1 >= ci1).all() + + assert not (ci1 == 1).all() + assert (ci1 == Index(["a", "b"])).all() + assert (ci1 == ci1.values).all() + + # invalid comparisons + with pytest.raises(ValueError, match="Lengths must match"): + ci1 == Index(["a", "b", "c"]) + + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + ci1 == ci2 + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, ordered=False) + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, categories=list("abc")) + + # tests + # make sure that we are testing for category inclusion properly + ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + # Same categories, but different order + # Unordered + assert ci.equals(CategoricalIndex(list("aabca"))) + # Ordered + assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True)) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + assert not ci.equals(CategoricalIndex(list("aabca"))) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca") + [np.nan]) + assert ci.equals(CategoricalIndex(list("aabca") + [np.nan])) + assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True)) + assert ci.equals(ci.copy()) + + def test_equals_categorical_unordered(self): + # https://github.com/pandas-dev/pandas/issues/16603 + a = CategoricalIndex(["A"], categories=["A", "B"]) + b = CategoricalIndex(["A"], categories=["B", "A"]) + c = CategoricalIndex(["C"], categories=["B", "A"]) + assert a.equals(b) + assert not a.equals(c) + assert not b.equals(c) + + def test_equals_non_category(self): + # GH#37667 Case where other contains a value not among ci's + # categories ("D") and also contains np.nan + ci = CategoricalIndex(["A", "B", np.nan, np.nan]) + other = Index(["A", "B", "D", np.nan]) + + assert not ci.equals(other) + + def test_equals_multiindex(self): + # dont raise NotImplementedError when calling is_dtype_compat + + mi = MultiIndex.from_arrays([["A", "B", "C", "D"], range(4)]) + ci = mi.to_flat_index().astype("category") + + assert not ci.equals(mi) + + def test_equals_string_dtype(self, any_string_dtype): + # GH#55364 + idx = CategoricalIndex(list("abc"), name="B") + other = Index(["a", "b", "c"], name="B", dtype=any_string_dtype) + assert idx.equals(other) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..522ca1bc2afde451e2d6feff780a973aceb4c39f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py @@ -0,0 +1,120 @@ +""" +Tests for CategoricalIndex.__repr__ and related methods. +""" +import pytest + +from pandas._config import using_pyarrow_string_dtype +import pandas._config.config as cf + +from pandas import CategoricalIndex +import pandas._testing as tm + + +class TestCategoricalIndexRepr: + def test_format_different_scalar_lengths(self): + # GH#35439 + idx = CategoricalIndex(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + msg = r"CategoricalIndex\.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert idx.format() == expected + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="repr different") + def test_string_categorical_index_repr(self): + # short + idx = CategoricalIndex(["a", "bb", "ccc"]) + expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["a", "bb", "ccc"] * 10) + expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', + 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["a", "bb", "ccc"] * 100) + expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + ... + 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("abcdefghijklmmo")) + expected = """CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', + 'm', 'm', 'o'], + categories=['a', 'b', 'c', 'd', ..., 'k', 'l', 'm', 'o'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # short + idx = CategoricalIndex(["あ", "いい", "ううう"]) + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["あ", "いい", "ううう"] * 10) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["あ", "いい", "ううう"] * 100) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + ... + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) + expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', + 'す', 'せ', 'そ'], + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # Enable Unicode option ----------------------------------------- + with cf.option_context("display.unicode.east_asian_width", True): + # short + idx = CategoricalIndex(["あ", "いい", "ううう"]) + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["あ", "いい", "ううう"] * 10) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', + 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["あ", "いい", "ううう"] * 100) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', + ... + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', + 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) + expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', + 'さ', 'し', 'す', 'せ', 'そ'], + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1f2b9fb159a6873c83e0a0a4e777913bb99fee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + Interval, +) +import pandas._testing as tm + + +class TestReindex: + def test_reindex_list_non_unique(self): + # GH#11586 + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) + + def test_reindex_categorical_non_unique(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) + + def test_reindex_list_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) + + def test_reindex_categorical_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) + + def test_reindex_duplicate_target(self): + # See GH25459 + cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"]) + res, indexer = cat.reindex(["a", "c", "c"]) + exp = Index(["a", "c", "c"]) + tm.assert_index_equal(res, exp, exact=True) + tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp)) + + res, indexer = cat.reindex( + CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"]) + ) + exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"]) + tm.assert_index_equal(res, exp, exact=True) + tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp)) + + def test_reindex_empty_index(self): + # See GH16770 + c = CategoricalIndex([]) + res, indexer = c.reindex(["a", "b"]) + tm.assert_index_equal(res, Index(["a", "b"]), exact=True) + tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp)) + + def test_reindex_categorical_added_category(self): + # GH 42424 + ci = CategoricalIndex( + [Interval(0, 1, closed="right"), Interval(1, 2, closed="right")], + ordered=True, + ) + ci_add = CategoricalIndex( + [ + Interval(0, 1, closed="right"), + Interval(1, 2, closed="right"), + Interval(2, 3, closed="right"), + Interval(3, 4, closed="right"), + ], + ordered=True, + ) + result, _ = ci.reindex(ci_add) + expected = ci_add + tm.assert_index_equal(expected, result) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_astype.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..59c555b9644a1230dc60d622fb5fdb80a1743afe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_astype.py @@ -0,0 +1,248 @@ +import re + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) + +from pandas import ( + CategoricalIndex, + Index, + IntervalIndex, + NaT, + Timedelta, + Timestamp, + interval_range, +) +import pandas._testing as tm + + +class AstypeTests: + """Tests common to IntervalIndex with any subtype""" + + def test_astype_idempotent(self, index): + result = index.astype("interval") + tm.assert_index_equal(result, index) + + result = index.astype(index.dtype) + tm.assert_index_equal(result, index) + + def test_astype_object(self, index): + result = index.astype(object) + expected = Index(index.values, dtype="object") + tm.assert_index_equal(result, expected) + assert not result.equals(index) + + def test_astype_category(self, index): + result = index.astype("category") + expected = CategoricalIndex(index.values) + tm.assert_index_equal(result, expected) + + result = index.astype(CategoricalDtype()) + tm.assert_index_equal(result, expected) + + # non-default params + categories = index.dropna().unique().values[:-1] + dtype = CategoricalDtype(categories=categories, ordered=True) + result = index.astype(dtype) + expected = CategoricalIndex(index.values, categories=categories, ordered=True) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [ + "int64", + "uint64", + "float64", + "complex128", + "period[M]", + "timedelta64", + "timedelta64[ns]", + "datetime64", + "datetime64[ns]", + "datetime64[ns, US/Eastern]", + ], + ) + def test_astype_cannot_cast(self, index, dtype): + msg = "Cannot cast IntervalIndex to dtype" + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + def test_astype_invalid_dtype(self, index): + msg = "data type [\"']fake_dtype[\"'] not understood" + with pytest.raises(TypeError, match=msg): + index.astype("fake_dtype") + + +class TestIntSubtype(AstypeTests): + """Tests specific to IntervalIndex with integer-like subtype""" + + indexes = [ + IntervalIndex.from_breaks(np.arange(-10, 11, dtype="int64")), + IntervalIndex.from_breaks(np.arange(100, dtype="uint64"), closed="left"), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize( + "subtype", ["float64", "datetime64[ns]", "timedelta64[ns]"] + ) + def test_subtype_conversion(self, index, subtype): + dtype = IntervalDtype(subtype, index.closed) + result = index.astype(dtype) + expected = IntervalIndex.from_arrays( + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "subtype_start, subtype_end", [("int64", "uint64"), ("uint64", "int64")] + ) + def test_subtype_integer(self, subtype_start, subtype_end): + index = IntervalIndex.from_breaks(np.arange(100, dtype=subtype_start)) + dtype = IntervalDtype(subtype_end, index.closed) + result = index.astype(dtype) + expected = IntervalIndex.from_arrays( + index.left.astype(subtype_end), + index.right.astype(subtype_end), + closed=index.closed, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.xfail(reason="GH#15832") + def test_subtype_integer_errors(self): + # int64 -> uint64 fails with negative values + index = interval_range(-10, 10) + dtype = IntervalDtype("uint64", "right") + + # Until we decide what the exception message _should_ be, we + # assert something that it should _not_ be. + # We should _not_ be getting a message suggesting that the -10 + # has been wrapped around to a large-positive integer + msg = "^(?!(left side of interval must be <= right side))" + with pytest.raises(ValueError, match=msg): + index.astype(dtype) + + +class TestFloatSubtype(AstypeTests): + """Tests specific to IntervalIndex with float subtype""" + + indexes = [ + interval_range(-10.0, 10.0, closed="neither"), + IntervalIndex.from_arrays( + [-1.5, np.nan, 0.0, 0.0, 1.5], [-0.5, np.nan, 1.0, 1.0, 3.0], closed="both" + ), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize("subtype", ["int64", "uint64"]) + def test_subtype_integer(self, subtype): + index = interval_range(0.0, 10.0) + dtype = IntervalDtype(subtype, "right") + result = index.astype(dtype) + expected = IntervalIndex.from_arrays( + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + ) + tm.assert_index_equal(result, expected) + + # raises with NA + msg = r"Cannot convert non-finite values \(NA or inf\) to integer" + with pytest.raises(ValueError, match=msg): + index.insert(0, np.nan).astype(dtype) + + @pytest.mark.parametrize("subtype", ["int64", "uint64"]) + def test_subtype_integer_with_non_integer_borders(self, subtype): + index = interval_range(0.0, 3.0, freq=0.25) + dtype = IntervalDtype(subtype, "right") + result = index.astype(dtype) + expected = IntervalIndex.from_arrays( + index.left.astype(subtype), index.right.astype(subtype), closed=index.closed + ) + tm.assert_index_equal(result, expected) + + def test_subtype_integer_errors(self): + # float64 -> uint64 fails with negative values + index = interval_range(-10.0, 10.0) + dtype = IntervalDtype("uint64", "right") + msg = re.escape( + "Cannot convert interval[float64, right] to interval[uint64, right]; " + "subtypes are incompatible" + ) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + @pytest.mark.parametrize("subtype", ["datetime64[ns]", "timedelta64[ns]"]) + def test_subtype_datetimelike(self, index, subtype): + dtype = IntervalDtype(subtype, "right") + msg = "Cannot convert .* to .*; subtypes are incompatible" + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + +class TestDatetimelikeSubtype(AstypeTests): + """Tests specific to IntervalIndex with datetime-like subtype""" + + indexes = [ + interval_range(Timestamp("2018-01-01"), periods=10, closed="neither"), + interval_range(Timestamp("2018-01-01"), periods=10).insert(2, NaT), + interval_range(Timestamp("2018-01-01", tz="US/Eastern"), periods=10), + interval_range(Timedelta("0 days"), periods=10, closed="both"), + interval_range(Timedelta("0 days"), periods=10).insert(2, NaT), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize("subtype", ["int64", "uint64"]) + def test_subtype_integer(self, index, subtype): + dtype = IntervalDtype(subtype, "right") + + if subtype != "int64": + msg = ( + r"Cannot convert interval\[(timedelta64|datetime64)\[ns.*\], .*\] " + r"to interval\[uint64, .*\]" + ) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + return + + result = index.astype(dtype) + new_left = index.left.astype(subtype) + new_right = index.right.astype(subtype) + + expected = IntervalIndex.from_arrays(new_left, new_right, closed=index.closed) + tm.assert_index_equal(result, expected) + + def test_subtype_float(self, index): + dtype = IntervalDtype("float64", "right") + msg = "Cannot convert .* to .*; subtypes are incompatible" + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + def test_subtype_datetimelike(self): + # datetime -> timedelta raises + dtype = IntervalDtype("timedelta64[ns]", "right") + msg = "Cannot convert .* to .*; subtypes are incompatible" + + index = interval_range(Timestamp("2018-01-01"), periods=10) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + index = interval_range(Timestamp("2018-01-01", tz="CET"), periods=10) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) + + # timedelta -> datetime raises + dtype = IntervalDtype("datetime64[ns]", "right") + index = interval_range(Timedelta("0 days"), periods=10) + with pytest.raises(TypeError, match=msg): + index.astype(dtype) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_pickle.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..308a90e72eab5db55f300341212d2c04e82c6900 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_pickle.py @@ -0,0 +1,13 @@ +import pytest + +from pandas import IntervalIndex +import pandas._testing as tm + + +class TestPickle: + @pytest.mark.parametrize("closed", ["left", "right", "both"]) + def test_pickle_round_trip_closed(self, closed): + # https://github.com/pandas-dev/pandas/issues/35658 + idx = IntervalIndex.from_tuples([(1, 2), (2, 3)], closed=closed) + result = tm.round_trip_pickle(idx) + tm.assert_index_equal(result, idx) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_setops.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_setops.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0816a9405cb9dd6ed81691e72012c948b898a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/interval/test_setops.py @@ -0,0 +1,208 @@ +import numpy as np +import pytest + +from pandas import ( + Index, + IntervalIndex, + Timestamp, + interval_range, +) +import pandas._testing as tm + + +def monotonic_index(start, end, dtype="int64", closed="right"): + return IntervalIndex.from_breaks(np.arange(start, end, dtype=dtype), closed=closed) + + +def empty_index(dtype="int64", closed="right"): + return IntervalIndex(np.array([], dtype=dtype), closed=closed) + + +class TestIntervalIndex: + def test_union(self, closed, sort): + index = monotonic_index(0, 11, closed=closed) + other = monotonic_index(5, 13, closed=closed) + + expected = monotonic_index(0, 13, closed=closed) + result = index[::-1].union(other, sort=sort) + if sort in (None, True): + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result.sort_values(), expected) + + result = other[::-1].union(index, sort=sort) + if sort in (None, True): + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result.sort_values(), expected) + + tm.assert_index_equal(index.union(index, sort=sort), index) + tm.assert_index_equal(index.union(index[:1], sort=sort), index) + + def test_union_empty_result(self, closed, sort): + # GH 19101: empty result, same dtype + index = empty_index(dtype="int64", closed=closed) + result = index.union(index, sort=sort) + tm.assert_index_equal(result, index) + + # GH 19101: empty result, different numeric dtypes -> common dtype is f8 + other = empty_index(dtype="float64", closed=closed) + result = index.union(other, sort=sort) + expected = other + tm.assert_index_equal(result, expected) + + other = index.union(index, sort=sort) + tm.assert_index_equal(result, expected) + + other = empty_index(dtype="uint64", closed=closed) + result = index.union(other, sort=sort) + tm.assert_index_equal(result, expected) + + result = other.union(index, sort=sort) + tm.assert_index_equal(result, expected) + + def test_intersection(self, closed, sort): + index = monotonic_index(0, 11, closed=closed) + other = monotonic_index(5, 13, closed=closed) + + expected = monotonic_index(5, 11, closed=closed) + result = index[::-1].intersection(other, sort=sort) + if sort in (None, True): + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result.sort_values(), expected) + + result = other[::-1].intersection(index, sort=sort) + if sort in (None, True): + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result.sort_values(), expected) + + tm.assert_index_equal(index.intersection(index, sort=sort), index) + + # GH 26225: nested intervals + index = IntervalIndex.from_tuples([(1, 2), (1, 3), (1, 4), (0, 2)]) + other = IntervalIndex.from_tuples([(1, 2), (1, 3)]) + expected = IntervalIndex.from_tuples([(1, 2), (1, 3)]) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + # GH 26225 + index = IntervalIndex.from_tuples([(0, 3), (0, 2)]) + other = IntervalIndex.from_tuples([(0, 2), (1, 3)]) + expected = IntervalIndex.from_tuples([(0, 2)]) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + # GH 26225: duplicate nan element + index = IntervalIndex([np.nan, np.nan]) + other = IntervalIndex([np.nan]) + expected = IntervalIndex([np.nan]) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + def test_intersection_empty_result(self, closed, sort): + index = monotonic_index(0, 11, closed=closed) + + # GH 19101: empty result, same dtype + other = monotonic_index(300, 314, closed=closed) + expected = empty_index(dtype="int64", closed=closed) + result = index.intersection(other, sort=sort) + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, different numeric dtypes -> common dtype is float64 + other = monotonic_index(300, 314, dtype="float64", closed=closed) + result = index.intersection(other, sort=sort) + expected = other[:0] + tm.assert_index_equal(result, expected) + + other = monotonic_index(300, 314, dtype="uint64", closed=closed) + result = index.intersection(other, sort=sort) + tm.assert_index_equal(result, expected) + + def test_intersection_duplicates(self): + # GH#38743 + index = IntervalIndex.from_tuples([(1, 2), (1, 2), (2, 3), (3, 4)]) + other = IntervalIndex.from_tuples([(1, 2), (2, 3)]) + expected = IntervalIndex.from_tuples([(1, 2), (2, 3)]) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + def test_difference(self, closed, sort): + index = IntervalIndex.from_arrays([1, 0, 3, 2], [1, 2, 3, 4], closed=closed) + result = index.difference(index[:1], sort=sort) + expected = index[1:] + if sort is None: + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, same dtype + result = index.difference(index, sort=sort) + expected = empty_index(dtype="int64", closed=closed) + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, different dtypes + other = IntervalIndex.from_arrays( + index.left.astype("float64"), index.right, closed=closed + ) + result = index.difference(other, sort=sort) + tm.assert_index_equal(result, expected) + + def test_symmetric_difference(self, closed, sort): + index = monotonic_index(0, 11, closed=closed) + result = index[1:].symmetric_difference(index[:-1], sort=sort) + expected = IntervalIndex([index[0], index[-1]]) + if sort in (None, True): + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result.sort_values(), expected) + + # GH 19101: empty result, same dtype + result = index.symmetric_difference(index, sort=sort) + expected = empty_index(dtype="int64", closed=closed) + if sort in (None, True): + tm.assert_index_equal(result, expected) + else: + tm.assert_index_equal(result.sort_values(), expected) + + # GH 19101: empty result, different dtypes + other = IntervalIndex.from_arrays( + index.left.astype("float64"), index.right, closed=closed + ) + result = index.symmetric_difference(other, sort=sort) + expected = empty_index(dtype="float64", closed=closed) + tm.assert_index_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:'<' not supported between:RuntimeWarning") + @pytest.mark.parametrize( + "op_name", ["union", "intersection", "difference", "symmetric_difference"] + ) + def test_set_incompatible_types(self, closed, op_name, sort): + index = monotonic_index(0, 11, closed=closed) + set_op = getattr(index, op_name) + + # TODO: standardize return type of non-union setops type(self vs other) + # non-IntervalIndex + if op_name == "difference": + expected = index + else: + expected = getattr(index.astype("O"), op_name)(Index([1, 2, 3])) + result = set_op(Index([1, 2, 3]), sort=sort) + tm.assert_index_equal(result, expected) + + # mixed closed -> cast to object + for other_closed in {"right", "left", "both", "neither"} - {closed}: + other = monotonic_index(0, 11, closed=other_closed) + expected = getattr(index.astype(object), op_name)(other, sort=sort) + if op_name == "difference": + expected = index + result = set_op(other, sort=sort) + tm.assert_index_equal(result, expected) + + # GH 19016: incompatible dtypes -> cast to object + other = interval_range(Timestamp("20180101"), periods=9, closed=closed) + expected = getattr(index.astype(object), op_name)(other, sort=sort) + if op_name == "difference": + expected = index + result = set_op(other, sort=sort) + tm.assert_index_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50a42a9dbefdac72a8379529b88c5c87eb83c507 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67d1bd2c208317da5d7dd684ccdc73e74cfc1148 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_analytics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_analytics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7c161631c67a6e333d669c7079f4dbd1cf62c85 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_analytics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_astype.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecb1126df86307fa11db47d4f7fa3fcc2ccec9b3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_astype.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_compat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..634e9c0892d8ba73c05742110a068fb005d00847 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_compat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_constructors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2156803e439f89cc85a0ef6dbb0cb29b4a7dbda5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_conversion.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9f264b2aa5f52a15b4524af420701dc711bcb3d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_conversion.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_copy.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_copy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42ecec6fcf4ac57b69035d7a11dfe35758693e7f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_copy.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_drop.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_drop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b60658192083ed5c8566fa3568b924a9efcdb70 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_drop.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_duplicates.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_duplicates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10b4312956d08f92229aa1e5134213d0f1d42371 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_duplicates.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_equivalence.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_equivalence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22994df7d6b18737db80d1a2423a3dbb66c7f319 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_equivalence.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_formats.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5031911fd525b935d6452111ed403a27e47defb4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_formats.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_level_values.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_level_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc828a50dbe11dead5b070954485aa137ca22410 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_level_values.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_set.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b63850bd17e2f9cf0ebb6287f3db9cd4323a231 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_set.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_indexing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2df544351a8fd7f1462c7a655769507c75be4a25 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_integrity.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_integrity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c9d25db82319350e502c84abd0dd48a0cc8a3c7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_integrity.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_isin.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_isin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1155aae97122e4b5110ad3462ef928a1a426d3aa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_isin.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_join.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_join.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d16430e1cdac24305143b0df9f59f5104d9b04aa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_join.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_lexsort.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_lexsort.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cbe2751b0e61a197a50c6a20e2e867875d99438 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_lexsort.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_missing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..953dfac33e6ca03fc3f5314ff01d9ae3d089763a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_missing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_monotonic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_monotonic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07df0c13b5b9a8a0693f9c6cc1cb9114f600d0a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_monotonic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_names.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_names.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..069fa6658881c5ce1b7dfe41ce20a7d9929c8d10 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_names.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_partial_indexing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_partial_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e107c075232875b79421400adb6d30817f57a0fb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_partial_indexing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_pickle.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad7b51b6c1bace033243b118760273aea6148142 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_pickle.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_reindex.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_reindex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84c71f03c629a58150c6d5dbc5215f7685bf82a8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_reindex.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_reshape.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_reshape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7727b8f394d8baf7da337911d9b6030e303391e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_reshape.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_setops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_setops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf23206857f10b9f5c39f0908742ba227ea361bc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_setops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_sorting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_sorting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1992a54b4c8aa51994df01936a896c1e342fbd14 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_sorting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_take.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_take.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ecff0d5f3600ba771f02d27db4de588dd4252b5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_take.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_constructors.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..8456e6a7acba5a160556f6f3192fa795ce678786 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_constructors.py @@ -0,0 +1,860 @@ +from datetime import ( + date, + datetime, +) +import itertools + +import numpy as np +import pytest + +from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike + +import pandas as pd +from pandas import ( + Index, + MultiIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +def test_constructor_single_level(): + result = MultiIndex( + levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"] + ) + assert isinstance(result, MultiIndex) + expected = Index(["foo", "bar", "baz", "qux"], name="first") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["first"] + + +def test_constructor_no_levels(): + msg = "non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex(levels=[], codes=[]) + + msg = "Must pass both levels and codes" + with pytest.raises(TypeError, match=msg): + MultiIndex(levels=[]) + with pytest.raises(TypeError, match=msg): + MultiIndex(codes=[]) + + +def test_constructor_nonhashable_names(): + # GH 20527 + levels = [[1, 2], ["one", "two"]] + codes = [[0, 0, 1, 1], [0, 1, 0, 1]] + names = (["foo"], ["bar"]) + msg = r"MultiIndex\.name must be a hashable type" + with pytest.raises(TypeError, match=msg): + MultiIndex(levels=levels, codes=codes, names=names) + + # With .rename() + mi = MultiIndex( + levels=[[1, 2], ["one", "two"]], + codes=[[0, 0, 1, 1], [0, 1, 0, 1]], + names=("foo", "bar"), + ) + renamed = [["fooo"], ["barr"]] + with pytest.raises(TypeError, match=msg): + mi.rename(names=renamed) + + # With .set_names() + with pytest.raises(TypeError, match=msg): + mi.set_names(names=renamed) + + +def test_constructor_mismatched_codes_levels(idx): + codes = [np.array([1]), np.array([2]), np.array([3])] + levels = ["a"] + + msg = "Length of levels and codes must be the same" + with pytest.raises(ValueError, match=msg): + MultiIndex(levels=levels, codes=codes) + + length_error = ( + r"On level 0, code max \(3\) >= length of level \(1\)\. " + "NOTE: this index is in an inconsistent state" + ) + label_error = r"Unequal code lengths: \[4, 2\]" + code_value_error = r"On level 0, code value \(-2\) < -1" + + # important to check that it's looking at the right thing. + with pytest.raises(ValueError, match=length_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, 1, 2, 3], [0, 3, 4, 1]]) + + with pytest.raises(ValueError, match=label_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, 0, 0, 0], [0, 0]]) + + # external API + with pytest.raises(ValueError, match=length_error): + idx.copy().set_levels([["a"], ["b"]]) + + with pytest.raises(ValueError, match=label_error): + idx.copy().set_codes([[0, 0, 0, 0], [0, 0]]) + + # test set_codes with verify_integrity=False + # the setting should not raise any value error + idx.copy().set_codes(codes=[[0, 0, 0, 0], [0, 0]], verify_integrity=False) + + # code value smaller than -1 + with pytest.raises(ValueError, match=code_value_error): + MultiIndex(levels=[["a"], ["b"]], codes=[[0, -2], [0, 0]]) + + +def test_na_levels(): + # GH26408 + # test if codes are re-assigned value -1 for levels + # with missing values (NaN, NaT, None) + result = MultiIndex( + levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[0, -1, 1, 2, 3, 4]] + ) + expected = MultiIndex( + levels=[[np.nan, None, pd.NaT, 128, 2]], codes=[[-1, -1, -1, -1, 3, 4]] + ) + tm.assert_index_equal(result, expected) + + result = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[0, -1, 1, 2, 3, 4]] + ) + expected = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[-1, -1, 1, -1, 3, -1]] + ) + tm.assert_index_equal(result, expected) + + # verify set_levels and set_codes + result = MultiIndex( + levels=[[1, 2, 3, 4, 5]], codes=[[0, -1, 1, 2, 3, 4]] + ).set_levels([[np.nan, "s", pd.NaT, 128, None]]) + tm.assert_index_equal(result, expected) + + result = MultiIndex( + levels=[[np.nan, "s", pd.NaT, 128, None]], codes=[[1, 2, 2, 2, 2, 2]] + ).set_codes([[0, -1, 1, 2, 3, 4]]) + tm.assert_index_equal(result, expected) + + +def test_copy_in_constructor(): + levels = np.array(["a", "b", "c"]) + codes = np.array([1, 1, 2, 0, 0, 1, 1]) + val = codes[0] + mi = MultiIndex(levels=[levels, levels], codes=[codes, codes], copy=True) + assert mi.codes[0][0] == val + codes[0] = 15 + assert mi.codes[0][0] == val + val = levels[0] + levels[0] = "PANDA" + assert mi.levels[0][0] == val + + +# ---------------------------------------------------------------------------- +# from_arrays +# ---------------------------------------------------------------------------- +def test_from_arrays(idx): + arrays = [ + np.asarray(lev).take(level_codes) + for lev, level_codes in zip(idx.levels, idx.codes) + ] + + # list of arrays as input + result = MultiIndex.from_arrays(arrays, names=idx.names) + tm.assert_index_equal(result, idx) + + # infer correctly + result = MultiIndex.from_arrays([[pd.NaT, Timestamp("20130101")], ["a", "b"]]) + assert result.levels[0].equals(Index([Timestamp("20130101")])) + assert result.levels[1].equals(Index(["a", "b"])) + + +def test_from_arrays_iterator(idx): + # GH 18434 + arrays = [ + np.asarray(lev).take(level_codes) + for lev, level_codes in zip(idx.levels, idx.codes) + ] + + # iterator as input + result = MultiIndex.from_arrays(iter(arrays), names=idx.names) + tm.assert_index_equal(result, idx) + + # invalid iterator input + msg = "Input must be a list / sequence of array-likes." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_arrays(0) + + +def test_from_arrays_tuples(idx): + arrays = tuple( + tuple(np.asarray(lev).take(level_codes)) + for lev, level_codes in zip(idx.levels, idx.codes) + ) + + # tuple of tuples as input + result = MultiIndex.from_arrays(arrays, names=idx.names) + tm.assert_index_equal(result, idx) + + +@pytest.mark.parametrize( + ("idx1", "idx2"), + [ + ( + pd.period_range("2011-01-01", freq="D", periods=3), + pd.period_range("2015-01-01", freq="h", periods=3), + ), + ( + date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"), + date_range("2015-01-01 10:00", freq="h", periods=3, tz="Asia/Tokyo"), + ), + ( + pd.timedelta_range("1 days", freq="D", periods=3), + pd.timedelta_range("2 hours", freq="h", periods=3), + ), + ], +) +def test_from_arrays_index_series_period_datetimetz_and_timedelta(idx1, idx2): + result = MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + tm.assert_index_equal(result, result2) + + +def test_from_arrays_index_datetimelike_mixed(): + idx1 = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern") + idx2 = date_range("2015-01-01 10:00", freq="h", periods=3) + idx3 = pd.timedelta_range("1 days", freq="D", periods=3) + idx4 = pd.period_range("2011-01-01", freq="D", periods=3) + + result = MultiIndex.from_arrays([idx1, idx2, idx3, idx4]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + tm.assert_index_equal(result.get_level_values(2), idx3) + tm.assert_index_equal(result.get_level_values(3), idx4) + + result2 = MultiIndex.from_arrays( + [Series(idx1), Series(idx2), Series(idx3), Series(idx4)] + ) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + tm.assert_index_equal(result2.get_level_values(2), idx3) + tm.assert_index_equal(result2.get_level_values(3), idx4) + + tm.assert_index_equal(result, result2) + + +def test_from_arrays_index_series_categorical(): + # GH13743 + idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=False) + idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=True) + + result = MultiIndex.from_arrays([idx1, idx2]) + tm.assert_index_equal(result.get_level_values(0), idx1) + tm.assert_index_equal(result.get_level_values(1), idx2) + + result2 = MultiIndex.from_arrays([Series(idx1), Series(idx2)]) + tm.assert_index_equal(result2.get_level_values(0), idx1) + tm.assert_index_equal(result2.get_level_values(1), idx2) + + result3 = MultiIndex.from_arrays([idx1.values, idx2.values]) + tm.assert_index_equal(result3.get_level_values(0), idx1) + tm.assert_index_equal(result3.get_level_values(1), idx2) + + +def test_from_arrays_empty(): + # 0 levels + msg = "Must pass non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_arrays(arrays=[]) + + # 1 level + result = MultiIndex.from_arrays(arrays=[[]], names=["A"]) + assert isinstance(result, MultiIndex) + expected = Index([], name="A") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["A"] + + # N levels + for N in [2, 3]: + arrays = [[]] * N + names = list("ABC")[:N] + result = MultiIndex.from_arrays(arrays=arrays, names=names) + expected = MultiIndex(levels=[[]] * N, codes=[[]] * N, names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "invalid_sequence_of_arrays", + [ + 1, + [1], + [1, 2], + [[1], 2], + [1, [2]], + "a", + ["a"], + ["a", "b"], + [["a"], "b"], + (1,), + (1, 2), + ([1], 2), + (1, [2]), + "a", + ("a",), + ("a", "b"), + (["a"], "b"), + [(1,), 2], + [1, (2,)], + [("a",), "b"], + ((1,), 2), + (1, (2,)), + (("a",), "b"), + ], +) +def test_from_arrays_invalid_input(invalid_sequence_of_arrays): + msg = "Input must be a list / sequence of array-likes" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_arrays(arrays=invalid_sequence_of_arrays) + + +@pytest.mark.parametrize( + "idx1, idx2", [([1, 2, 3], ["a", "b"]), ([], ["a", "b"]), ([1, 2, 3], [])] +) +def test_from_arrays_different_lengths(idx1, idx2): + # see gh-13599 + msg = "^all arrays must be same length$" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_arrays([idx1, idx2]) + + +def test_from_arrays_respects_none_names(): + # GH27292 + a = Series([1, 2, 3], name="foo") + b = Series(["a", "b", "c"], name="bar") + + result = MultiIndex.from_arrays([a, b], names=None) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b", "c"]], codes=[[0, 1, 2], [0, 1, 2]], names=None + ) + + tm.assert_index_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# from_tuples +# ---------------------------------------------------------------------------- +def test_from_tuples(): + msg = "Cannot infer number of levels from empty list" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_tuples([]) + + expected = MultiIndex( + levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"] + ) + + # input tuples + result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=["a", "b"]) + tm.assert_index_equal(result, expected) + + +def test_from_tuples_iterator(): + # GH 18434 + # input iterator for tuples + expected = MultiIndex( + levels=[[1, 3], [2, 4]], codes=[[0, 1], [0, 1]], names=["a", "b"] + ) + + result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=["a", "b"]) + tm.assert_index_equal(result, expected) + + # input non-iterables + msg = "Input must be a list / sequence of tuple-likes." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_tuples(0) + + +def test_from_tuples_empty(): + # GH 16777 + result = MultiIndex.from_tuples([], names=["a", "b"]) + expected = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"]) + tm.assert_index_equal(result, expected) + + +def test_from_tuples_index_values(idx): + result = MultiIndex.from_tuples(idx) + assert (result.values == idx.values).all() + + +def test_tuples_with_name_string(): + # GH 15110 and GH 14848 + + li = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] + msg = "Names should be list-like for a MultiIndex" + with pytest.raises(ValueError, match=msg): + Index(li, name="abc") + with pytest.raises(ValueError, match=msg): + Index(li, name="a") + + +def test_from_tuples_with_tuple_label(): + # GH 15457 + expected = pd.DataFrame( + [[2, 1, 2], [4, (1, 2), 3]], columns=["a", "b", "c"] + ).set_index(["a", "b"]) + idx = MultiIndex.from_tuples([(2, 1), (4, (1, 2))], names=("a", "b")) + result = pd.DataFrame([2, 3], columns=["c"], index=idx) + tm.assert_frame_equal(expected, result) + + +# ---------------------------------------------------------------------------- +# from_product +# ---------------------------------------------------------------------------- +def test_from_product_empty_zero_levels(): + # 0 levels + msg = "Must pass non-zero number of levels/codes" + with pytest.raises(ValueError, match=msg): + MultiIndex.from_product([]) + + +def test_from_product_empty_one_level(): + result = MultiIndex.from_product([[]], names=["A"]) + expected = Index([], name="A") + tm.assert_index_equal(result.levels[0], expected) + assert result.names == ["A"] + + +@pytest.mark.parametrize( + "first, second", [([], []), (["foo", "bar", "baz"], []), ([], ["a", "b", "c"])] +) +def test_from_product_empty_two_levels(first, second): + names = ["A", "B"] + result = MultiIndex.from_product([first, second], names=names) + expected = MultiIndex(levels=[first, second], codes=[[], []], names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("N", list(range(4))) +def test_from_product_empty_three_levels(N): + # GH12258 + names = ["A", "B", "C"] + lvl2 = list(range(N)) + result = MultiIndex.from_product([[], lvl2, []], names=names) + expected = MultiIndex(levels=[[], lvl2, []], codes=[[], [], []], names=names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "invalid_input", [1, [1], [1, 2], [[1], 2], "a", ["a"], ["a", "b"], [["a"], "b"]] +) +def test_from_product_invalid_input(invalid_input): + msg = r"Input must be a list / sequence of iterables|Input must be list-like" + with pytest.raises(TypeError, match=msg): + MultiIndex.from_product(iterables=invalid_input) + + +def test_from_product_datetimeindex(): + dt_index = date_range("2000-01-01", periods=2) + mi = MultiIndex.from_product([[1, 2], dt_index]) + etalon = construct_1d_object_array_from_listlike( + [ + (1, Timestamp("2000-01-01")), + (1, Timestamp("2000-01-02")), + (2, Timestamp("2000-01-01")), + (2, Timestamp("2000-01-02")), + ] + ) + tm.assert_numpy_array_equal(mi.values, etalon) + + +def test_from_product_rangeindex(): + # RangeIndex is preserved by factorize, so preserved in levels + rng = Index(range(5)) + other = ["a", "b"] + mi = MultiIndex.from_product([rng, other]) + tm.assert_index_equal(mi._levels[0], rng, exact=True) + + +@pytest.mark.parametrize("ordered", [False, True]) +@pytest.mark.parametrize("f", [lambda x: x, lambda x: Series(x), lambda x: x.values]) +def test_from_product_index_series_categorical(ordered, f): + # GH13743 + first = ["foo", "bar"] + + idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), ordered=ordered) + expected = pd.CategoricalIndex( + list("abcaab") + list("abcaab"), categories=list("bac"), ordered=ordered + ) + + result = MultiIndex.from_product([first, f(idx)]) + tm.assert_index_equal(result.get_level_values(1), expected) + + +def test_from_product(): + first = ["foo", "bar", "buz"] + second = ["a", "b", "c"] + names = ["first", "second"] + result = MultiIndex.from_product([first, second], names=names) + + tuples = [ + ("foo", "a"), + ("foo", "b"), + ("foo", "c"), + ("bar", "a"), + ("bar", "b"), + ("bar", "c"), + ("buz", "a"), + ("buz", "b"), + ("buz", "c"), + ] + expected = MultiIndex.from_tuples(tuples, names=names) + + tm.assert_index_equal(result, expected) + + +def test_from_product_iterator(): + # GH 18434 + first = ["foo", "bar", "buz"] + second = ["a", "b", "c"] + names = ["first", "second"] + tuples = [ + ("foo", "a"), + ("foo", "b"), + ("foo", "c"), + ("bar", "a"), + ("bar", "b"), + ("bar", "c"), + ("buz", "a"), + ("buz", "b"), + ("buz", "c"), + ] + expected = MultiIndex.from_tuples(tuples, names=names) + + # iterator as input + result = MultiIndex.from_product(iter([first, second]), names=names) + tm.assert_index_equal(result, expected) + + # Invalid non-iterable input + msg = "Input must be a list / sequence of iterables." + with pytest.raises(TypeError, match=msg): + MultiIndex.from_product(0) + + +@pytest.mark.parametrize( + "a, b, expected_names", + [ + ( + Series([1, 2, 3], name="foo"), + Series(["a", "b"], name="bar"), + ["foo", "bar"], + ), + (Series([1, 2, 3], name="foo"), ["a", "b"], ["foo", None]), + ([1, 2, 3], ["a", "b"], None), + ], +) +def test_from_product_infer_names(a, b, expected_names): + # GH27292 + result = MultiIndex.from_product([a, b]) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b"]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + names=expected_names, + ) + tm.assert_index_equal(result, expected) + + +def test_from_product_respects_none_names(): + # GH27292 + a = Series([1, 2, 3], name="foo") + b = Series(["a", "b"], name="bar") + + result = MultiIndex.from_product([a, b], names=None) + expected = MultiIndex( + levels=[[1, 2, 3], ["a", "b"]], + codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], + names=None, + ) + tm.assert_index_equal(result, expected) + + +def test_from_product_readonly(): + # GH#15286 passing read-only array to from_product + a = np.array(range(3)) + b = ["a", "b"] + expected = MultiIndex.from_product([a, b]) + + a.setflags(write=False) + result = MultiIndex.from_product([a, b]) + tm.assert_index_equal(result, expected) + + +def test_create_index_existing_name(idx): + # GH11193, when an existing index is passed, and a new name is not + # specified, the new index should inherit the previous object name + index = idx + index.names = ["foo", "bar"] + result = Index(index) + expected = Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ) + ) + tm.assert_index_equal(result, expected) + + result = Index(index, name="A") + expected = Index( + Index( + [ + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ], + dtype="object", + ), + name="A", + ) + tm.assert_index_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# from_frame +# ---------------------------------------------------------------------------- +def test_from_frame(): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], columns=["L1", "L2"] + ) + expected = MultiIndex.from_tuples( + [("a", "a"), ("a", "b"), ("b", "a"), ("b", "b")], names=["L1", "L2"] + ) + result = MultiIndex.from_frame(df) + tm.assert_index_equal(expected, result) + + +def test_from_frame_missing_values_multiIndex(): + # GH 39984 + pa = pytest.importorskip("pyarrow") + + df = pd.DataFrame( + { + "a": Series([1, 2, None], dtype="Int64"), + "b": pd.Float64Dtype().__from_arrow__(pa.array([0.2, np.nan, None])), + } + ) + multi_indexed = MultiIndex.from_frame(df) + expected = MultiIndex.from_arrays( + [ + Series([1, 2, None]).astype("Int64"), + pd.Float64Dtype().__from_arrow__(pa.array([0.2, np.nan, None])), + ], + names=["a", "b"], + ) + tm.assert_index_equal(multi_indexed, expected) + + +@pytest.mark.parametrize( + "non_frame", + [ + Series([1, 2, 3, 4]), + [1, 2, 3, 4], + [[1, 2], [3, 4], [5, 6]], + Index([1, 2, 3, 4]), + np.array([[1, 2], [3, 4], [5, 6]]), + 27, + ], +) +def test_from_frame_error(non_frame): + # GH 22420 + with pytest.raises(TypeError, match="Input must be a DataFrame"): + MultiIndex.from_frame(non_frame) + + +def test_from_frame_dtype_fidelity(): + # GH 22420 + df = pd.DataFrame( + { + "dates": date_range("19910905", periods=6, tz="US/Eastern"), + "a": [1, 1, 1, 2, 2, 2], + "b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + "c": ["x", "x", "y", "z", "x", "y"], + } + ) + original_dtypes = df.dtypes.to_dict() + + expected_mi = MultiIndex.from_arrays( + [ + date_range("19910905", periods=6, tz="US/Eastern"), + [1, 1, 1, 2, 2, 2], + pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + ["x", "x", "y", "z", "x", "y"], + ], + names=["dates", "a", "b", "c"], + ) + mi = MultiIndex.from_frame(df) + mi_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} + + tm.assert_index_equal(expected_mi, mi) + assert original_dtypes == mi_dtypes + + +@pytest.mark.parametrize( + "names_in,names_out", [(None, [("L1", "x"), ("L2", "y")]), (["x", "y"], ["x", "y"])] +) +def test_from_frame_valid_names(names_in, names_out): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], + columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), + ) + mi = MultiIndex.from_frame(df, names=names_in) + assert mi.names == names_out + + +@pytest.mark.parametrize( + "names,expected_error_msg", + [ + ("bad_input", "Names should be list-like for a MultiIndex"), + (["a", "b", "c"], "Length of names must match number of levels in MultiIndex"), + ], +) +def test_from_frame_invalid_names(names, expected_error_msg): + # GH 22420 + df = pd.DataFrame( + [["a", "a"], ["a", "b"], ["b", "a"], ["b", "b"]], + columns=MultiIndex.from_tuples([("L1", "x"), ("L2", "y")]), + ) + with pytest.raises(ValueError, match=expected_error_msg): + MultiIndex.from_frame(df, names=names) + + +def test_index_equal_empty_iterable(): + # #16844 + a = MultiIndex(levels=[[], []], codes=[[], []], names=["a", "b"]) + b = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"]) + tm.assert_index_equal(a, b) + + +def test_raise_invalid_sortorder(): + # Test that the MultiIndex constructor raise when a incorrect sortorder is given + # GH#28518 + + levels = [[0, 1], [0, 1, 2]] + + # Correct sortorder + MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 + ) + + with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"): + MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2 + ) + + with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"): + MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1 + ) + + +def test_datetimeindex(): + idx1 = pd.DatetimeIndex( + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo" + ) + idx2 = date_range("2010/01/01", periods=6, freq="ME", tz="US/Eastern") + idx = MultiIndex.from_arrays([idx1, idx2]) + + expected1 = pd.DatetimeIndex( + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"], tz="Asia/Tokyo" + ) + + tm.assert_index_equal(idx.levels[0], expected1) + tm.assert_index_equal(idx.levels[1], idx2) + + # from datetime combos + # GH 7888 + date1 = np.datetime64("today") + date2 = datetime.today() + date3 = Timestamp.today() + + for d1, d2 in itertools.product([date1, date2, date3], [date1, date2, date3]): + index = MultiIndex.from_product([[d1], [d2]]) + assert isinstance(index.levels[0], pd.DatetimeIndex) + assert isinstance(index.levels[1], pd.DatetimeIndex) + + # but NOT date objects, matching Index behavior + date4 = date.today() + index = MultiIndex.from_product([[date4], [date2]]) + assert not isinstance(index.levels[0], pd.DatetimeIndex) + assert isinstance(index.levels[1], pd.DatetimeIndex) + + +def test_constructor_with_tz(): + index = pd.DatetimeIndex( + ["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific" + ) + columns = pd.DatetimeIndex( + ["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo" + ) + + result = MultiIndex.from_arrays([index, columns]) + + assert result.names == ["dt1", "dt2"] + tm.assert_index_equal(result.levels[0], index) + tm.assert_index_equal(result.levels[1], columns) + + result = MultiIndex.from_arrays([Series(index), Series(columns)]) + + assert result.names == ["dt1", "dt2"] + tm.assert_index_equal(result.levels[0], index) + tm.assert_index_equal(result.levels[1], columns) + + +def test_multiindex_inference_consistency(): + # check that inference behavior matches the base class + + v = date.today() + + arr = [v, v] + + idx = Index(arr) + assert idx.dtype == object + + mi = MultiIndex.from_arrays([arr]) + lev = mi.levels[0] + assert lev.dtype == object + + mi = MultiIndex.from_product([arr]) + lev = mi.levels[0] + assert lev.dtype == object + + mi = MultiIndex.from_tuples([(x,) for x in arr]) + lev = mi.levels[0] + assert lev.dtype == object + + +def test_dtype_representation(using_infer_string): + # GH#46900 + pmidx = MultiIndex.from_arrays([[1], ["a"]], names=[("a", "b"), ("c", "d")]) + result = pmidx.dtypes + exp = "object" if not using_infer_string else "string" + expected = Series( + ["int64", exp], + index=MultiIndex.from_tuples([("a", "b"), ("c", "d")]), + dtype=object, + ) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_conversion.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..3c2ca045d6f990837fae4d2b3d7bcbbf40e175e9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_conversion.py @@ -0,0 +1,164 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, +) +import pandas._testing as tm + + +def test_to_numpy(idx): + result = idx.to_numpy() + exp = idx.values + tm.assert_numpy_array_equal(result, exp) + + +def test_to_frame(): + tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")] + + index = MultiIndex.from_tuples(tuples) + result = index.to_frame(index=False) + expected = DataFrame(tuples) + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")] + index = MultiIndex.from_tuples(tuples, names=["first", "second"]) + result = index.to_frame(index=False) + expected = DataFrame(tuples) + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + # See GH-22580 + index = MultiIndex.from_tuples(tuples) + result = index.to_frame(index=False, name=["first", "second"]) + expected = DataFrame(tuples) + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + result = index.to_frame(name=["first", "second"]) + expected.index = index + expected.columns = ["first", "second"] + tm.assert_frame_equal(result, expected) + + msg = "'name' must be a list / sequence of column names." + with pytest.raises(TypeError, match=msg): + index.to_frame(name="first") + + msg = "'name' should have same length as number of levels on index." + with pytest.raises(ValueError, match=msg): + index.to_frame(name=["first"]) + + # Tests for datetime index + index = MultiIndex.from_product([range(5), pd.date_range("20130101", periods=3)]) + result = index.to_frame(index=False) + expected = DataFrame( + { + 0: np.repeat(np.arange(5, dtype="int64"), 3), + 1: np.tile(pd.date_range("20130101", periods=3), 5), + } + ) + tm.assert_frame_equal(result, expected) + + result = index.to_frame() + expected.index = index + tm.assert_frame_equal(result, expected) + + # See GH-22580 + result = index.to_frame(index=False, name=["first", "second"]) + expected = DataFrame( + { + "first": np.repeat(np.arange(5, dtype="int64"), 3), + "second": np.tile(pd.date_range("20130101", periods=3), 5), + } + ) + tm.assert_frame_equal(result, expected) + + result = index.to_frame(name=["first", "second"]) + expected.index = index + tm.assert_frame_equal(result, expected) + + +def test_to_frame_dtype_fidelity(): + # GH 22420 + mi = MultiIndex.from_arrays( + [ + pd.date_range("19910905", periods=6, tz="US/Eastern"), + [1, 1, 1, 2, 2, 2], + pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + ["x", "x", "y", "z", "x", "y"], + ], + names=["dates", "a", "b", "c"], + ) + original_dtypes = {name: mi.levels[i].dtype for i, name in enumerate(mi.names)} + + expected_df = DataFrame( + { + "dates": pd.date_range("19910905", periods=6, tz="US/Eastern"), + "a": [1, 1, 1, 2, 2, 2], + "b": pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True), + "c": ["x", "x", "y", "z", "x", "y"], + } + ) + df = mi.to_frame(index=False) + df_dtypes = df.dtypes.to_dict() + + tm.assert_frame_equal(df, expected_df) + assert original_dtypes == df_dtypes + + +def test_to_frame_resulting_column_order(): + # GH 22420 + expected = ["z", 0, "a"] + mi = MultiIndex.from_arrays( + [["a", "b", "c"], ["x", "y", "z"], ["q", "w", "e"]], names=expected + ) + result = mi.to_frame().columns.tolist() + assert result == expected + + +def test_to_frame_duplicate_labels(): + # GH 45245 + data = [(1, 2), (3, 4)] + names = ["a", "a"] + index = MultiIndex.from_tuples(data, names=names) + with pytest.raises(ValueError, match="Cannot create duplicate column labels"): + index.to_frame() + + result = index.to_frame(allow_duplicates=True) + expected = DataFrame(data, index=index, columns=names) + tm.assert_frame_equal(result, expected) + + names = [None, 0] + index = MultiIndex.from_tuples(data, names=names) + with pytest.raises(ValueError, match="Cannot create duplicate column labels"): + index.to_frame() + + result = index.to_frame(allow_duplicates=True) + expected = DataFrame(data, index=index, columns=[0, 0]) + tm.assert_frame_equal(result, expected) + + +def test_to_flat_index(idx): + expected = pd.Index( + ( + ("foo", "one"), + ("foo", "two"), + ("bar", "one"), + ("baz", "two"), + ("qux", "one"), + ("qux", "two"), + ), + tupleize_cols=False, + ) + result = idx.to_flat_index() + tm.assert_index_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_join.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_join.py new file mode 100644 index 0000000000000000000000000000000000000000..edd0feaaa1159ff8340af772d27f2a7af09ceb87 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_join.py @@ -0,0 +1,268 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Interval, + MultiIndex, + Series, + StringDtype, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "other", [Index(["three", "one", "two"]), Index(["one"]), Index(["one", "three"])] +) +def test_join_level(idx, other, join_type): + join_index, lidx, ridx = other.join( + idx, how=join_type, level="second", return_indexers=True + ) + + exp_level = other.join(idx.levels[1], how=join_type) + assert join_index.levels[0].equals(idx.levels[0]) + assert join_index.levels[1].equals(exp_level) + + # pare down levels + mask = np.array([x[1] in exp_level for x in idx], dtype=bool) + exp_values = idx.values[mask] + tm.assert_numpy_array_equal(join_index.values, exp_values) + + if join_type in ("outer", "inner"): + join_index2, ridx2, lidx2 = idx.join( + other, how=join_type, level="second", return_indexers=True + ) + + assert join_index.equals(join_index2) + tm.assert_numpy_array_equal(lidx, lidx2) + tm.assert_numpy_array_equal(ridx, ridx2) + tm.assert_numpy_array_equal(join_index2.values, exp_values) + + +def test_join_level_corner_case(idx): + # some corner cases + index = Index(["three", "one", "two"]) + result = index.join(idx, level="second") + assert isinstance(result, MultiIndex) + + with pytest.raises(TypeError, match="Join.*MultiIndex.*ambiguous"): + idx.join(idx, level=1) + + +def test_join_self(idx, join_type): + result = idx.join(idx, how=join_type) + expected = idx + if join_type == "outer": + expected = expected.sort_values() + tm.assert_index_equal(result, expected) + + +def test_join_multi(): + # GH 10665 + midx = MultiIndex.from_product([np.arange(4), np.arange(4)], names=["a", "b"]) + idx = Index([1, 2, 5], name="b") + + # inner + jidx, lidx, ridx = midx.join(idx, how="inner", return_indexers=True) + exp_idx = MultiIndex.from_product([np.arange(4), [1, 2]], names=["a", "b"]) + exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp) + exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp) + tm.assert_index_equal(jidx, exp_idx) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) + # flip + jidx, ridx, lidx = idx.join(midx, how="inner", return_indexers=True) + tm.assert_index_equal(jidx, exp_idx) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) + + # keep MultiIndex + jidx, lidx, ridx = midx.join(idx, how="left", return_indexers=True) + exp_ridx = np.array( + [-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1], dtype=np.intp + ) + tm.assert_index_equal(jidx, midx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + # flip + jidx, ridx, lidx = idx.join(midx, how="right", return_indexers=True) + tm.assert_index_equal(jidx, midx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + + +def test_join_multi_wrong_order(): + # GH 25760 + # GH 28956 + + midx1 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + midx2 = MultiIndex.from_product([[1, 2], [3, 4]], names=["b", "a"]) + + join_idx, lidx, ridx = midx1.join(midx2, return_indexers=True) + + exp_ridx = np.array([-1, -1, -1, -1], dtype=np.intp) + + tm.assert_index_equal(midx1, join_idx) + assert lidx is None + tm.assert_numpy_array_equal(ridx, exp_ridx) + + +def test_join_multi_return_indexers(): + # GH 34074 + + midx1 = MultiIndex.from_product([[1, 2], [3, 4], [5, 6]], names=["a", "b", "c"]) + midx2 = MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"]) + + result = midx1.join(midx2, return_indexers=False) + tm.assert_index_equal(result, midx1) + + +def test_join_overlapping_interval_level(): + # GH 44096 + idx_1 = MultiIndex.from_tuples( + [ + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (1, Interval(2.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), # interval limit is here at 3.0, not at 2.0 + (2, Interval(3.0, 5.0)), + ], + names=["num", "interval"], + ) + + idx_2 = MultiIndex.from_tuples( + [ + (1, Interval(2.0, 5.0)), + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (2, Interval(3.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), + ], + names=["num", "interval"], + ) + + expected = MultiIndex.from_tuples( + [ + (1, Interval(0.0, 1.0)), + (1, Interval(1.0, 2.0)), + (1, Interval(2.0, 5.0)), + (2, Interval(0.0, 1.0)), + (2, Interval(1.0, 3.0)), + (2, Interval(3.0, 5.0)), + ], + names=["num", "interval"], + ) + result = idx_1.join(idx_2, how="outer") + + tm.assert_index_equal(result, expected) + + +def test_join_midx_ea(): + # GH#49277 + midx = MultiIndex.from_arrays( + [Series([1, 1, 3], dtype="Int64"), Series([1, 2, 3], dtype="Int64")], + names=["a", "b"], + ) + midx2 = MultiIndex.from_arrays( + [Series([1], dtype="Int64"), Series([3], dtype="Int64")], names=["a", "c"] + ) + result = midx.join(midx2, how="inner") + expected = MultiIndex.from_arrays( + [ + Series([1, 1], dtype="Int64"), + Series([1, 2], dtype="Int64"), + Series([3, 3], dtype="Int64"), + ], + names=["a", "b", "c"], + ) + tm.assert_index_equal(result, expected) + + +def test_join_midx_string(): + # GH#49277 + midx = MultiIndex.from_arrays( + [ + Series(["a", "a", "c"], dtype=StringDtype()), + Series(["a", "b", "c"], dtype=StringDtype()), + ], + names=["a", "b"], + ) + midx2 = MultiIndex.from_arrays( + [Series(["a"], dtype=StringDtype()), Series(["c"], dtype=StringDtype())], + names=["a", "c"], + ) + result = midx.join(midx2, how="inner") + expected = MultiIndex.from_arrays( + [ + Series(["a", "a"], dtype=StringDtype()), + Series(["a", "b"], dtype=StringDtype()), + Series(["c", "c"], dtype=StringDtype()), + ], + names=["a", "b", "c"], + ) + tm.assert_index_equal(result, expected) + + +def test_join_multi_with_nan(): + # GH29252 + df1 = DataFrame( + data={"col1": [1.1, 1.2]}, + index=MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]), + ) + df2 = DataFrame( + data={"col2": [2.1, 2.2]}, + index=MultiIndex.from_product([["A"], [np.nan, 2.0]], names=["id1", "id2"]), + ) + result = df1.join(df2) + expected = DataFrame( + data={"col1": [1.1, 1.2], "col2": [np.nan, 2.2]}, + index=MultiIndex.from_product([["A"], [1.0, 2.0]], names=["id1", "id2"]), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("val", [0, 5]) +def test_join_dtypes(any_numeric_ea_dtype, val): + # GH#49830 + midx = MultiIndex.from_arrays([Series([1, 2], dtype=any_numeric_ea_dtype), [3, 4]]) + midx2 = MultiIndex.from_arrays( + [Series([1, val, val], dtype=any_numeric_ea_dtype), [3, 4, 4]] + ) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_arrays( + [Series([val, val, 1, 2], dtype=any_numeric_ea_dtype), [4, 4, 3, 4]] + ).sort_values() + tm.assert_index_equal(result, expected) + + +def test_join_dtypes_all_nan(any_numeric_ea_dtype): + # GH#49830 + midx = MultiIndex.from_arrays( + [Series([1, 2], dtype=any_numeric_ea_dtype), [np.nan, np.nan]] + ) + midx2 = MultiIndex.from_arrays( + [Series([1, 0, 0], dtype=any_numeric_ea_dtype), [np.nan, np.nan, np.nan]] + ) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_arrays( + [ + Series([0, 0, 1, 2], dtype=any_numeric_ea_dtype), + [np.nan, np.nan, np.nan, np.nan], + ] + ) + tm.assert_index_equal(result, expected) + + +def test_join_index_levels(): + # GH#53093 + midx = midx = MultiIndex.from_tuples([("a", "2019-02-01"), ("a", "2019-02-01")]) + midx2 = MultiIndex.from_tuples([("a", "2019-01-31")]) + result = midx.join(midx2, how="outer") + expected = MultiIndex.from_tuples( + [("a", "2019-01-31"), ("a", "2019-02-01"), ("a", "2019-02-01")] + ) + tm.assert_index_equal(result.levels[1], expected.levels[1]) + tm.assert_index_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_names.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_names.py new file mode 100644 index 0000000000000000000000000000000000000000..45f19b4d70fb95cb2aee459a54d2ad53790b7df8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/indexes/multi/test_names.py @@ -0,0 +1,201 @@ +import pytest + +import pandas as pd +from pandas import MultiIndex +import pandas._testing as tm + + +def check_level_names(index, names): + assert [level.name for level in index.levels] == list(names) + + +def test_slice_keep_name(): + x = MultiIndex.from_tuples([("a", "b"), (1, 2), ("c", "d")], names=["x", "y"]) + assert x[1:].names == x.names + + +def test_index_name_retained(): + # GH9857 + result = pd.DataFrame({"x": [1, 2, 6], "y": [2, 2, 8], "z": [-5, 0, 5]}) + result = result.set_index("z") + result.loc[10] = [9, 10] + df_expected = pd.DataFrame( + {"x": [1, 2, 6, 9], "y": [2, 2, 8, 10], "z": [-5, 0, 5, 10]} + ) + df_expected = df_expected.set_index("z") + tm.assert_frame_equal(result, df_expected) + + +def test_changing_names(idx): + assert [level.name for level in idx.levels] == ["first", "second"] + + view = idx.view() + copy = idx.copy() + shallow_copy = idx._view() + + # changing names should not change level names on object + new_names = [name + "a" for name in idx.names] + idx.names = new_names + check_level_names(idx, ["firsta", "seconda"]) + + # and not on copies + check_level_names(view, ["first", "second"]) + check_level_names(copy, ["first", "second"]) + check_level_names(shallow_copy, ["first", "second"]) + + # and copies shouldn't change original + shallow_copy.names = [name + "c" for name in shallow_copy.names] + check_level_names(idx, ["firsta", "seconda"]) + + +def test_take_preserve_name(idx): + taken = idx.take([3, 0, 1]) + assert taken.names == idx.names + + +def test_copy_names(): + # Check that adding a "names" parameter to the copy is honored + # GH14302 + multi_idx = MultiIndex.from_tuples([(1, 2), (3, 4)], names=["MyName1", "MyName2"]) + multi_idx1 = multi_idx.copy() + + assert multi_idx.equals(multi_idx1) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx1.names == ["MyName1", "MyName2"] + + multi_idx2 = multi_idx.copy(names=["NewName1", "NewName2"]) + + assert multi_idx.equals(multi_idx2) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx2.names == ["NewName1", "NewName2"] + + multi_idx3 = multi_idx.copy(name=["NewName1", "NewName2"]) + + assert multi_idx.equals(multi_idx3) + assert multi_idx.names == ["MyName1", "MyName2"] + assert multi_idx3.names == ["NewName1", "NewName2"] + + # gh-35592 + with pytest.raises(ValueError, match="Length of new names must be 2, got 1"): + multi_idx.copy(names=["mario"]) + + with pytest.raises(TypeError, match="MultiIndex.name must be a hashable type"): + multi_idx.copy(names=[["mario"], ["luigi"]]) + + +def test_names(idx): + # names are assigned in setup + assert idx.names == ["first", "second"] + level_names = [level.name for level in idx.levels] + assert level_names == idx.names + + # setting bad names on existing + index = idx + with pytest.raises(ValueError, match="^Length of names"): + setattr(index, "names", list(index.names) + ["third"]) + with pytest.raises(ValueError, match="^Length of names"): + setattr(index, "names", []) + + # initializing with bad names (should always be equivalent) + major_axis, minor_axis = idx.levels + major_codes, minor_codes = idx.codes + with pytest.raises(ValueError, match="^Length of names"): + MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=["first"], + ) + with pytest.raises(ValueError, match="^Length of names"): + MultiIndex( + levels=[major_axis, minor_axis], + codes=[major_codes, minor_codes], + names=["first", "second", "third"], + ) + + # names are assigned on index, but not transferred to the levels + index.names = ["a", "b"] + level_names = [level.name for level in index.levels] + assert level_names == ["a", "b"] + + +def test_duplicate_level_names_access_raises(idx): + # GH19029 + idx.names = ["foo", "foo"] + with pytest.raises(ValueError, match="name foo occurs multiple times"): + idx._get_level_number("foo") + + +def test_get_names_from_levels(): + idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + + assert idx.levels[0].name == "a" + assert idx.levels[1].name == "b" + + +def test_setting_names_from_levels_raises(): + idx = MultiIndex.from_product([["a"], [1, 2]], names=["a", "b"]) + with pytest.raises(RuntimeError, match="set_names"): + idx.levels[0].name = "foo" + + with pytest.raises(RuntimeError, match="set_names"): + idx.levels[1].name = "foo" + + new = pd.Series(1, index=idx.levels[0]) + with pytest.raises(RuntimeError, match="set_names"): + new.index.name = "bar" + + assert pd.Index._no_setting_name is False + assert pd.RangeIndex._no_setting_name is False + + +@pytest.mark.parametrize("func", ["rename", "set_names"]) +@pytest.mark.parametrize( + "rename_dict, exp_names", + [ + ({"x": "z"}, ["z", "y", "z"]), + ({"x": "z", "y": "x"}, ["z", "x", "z"]), + ({"y": "z"}, ["x", "z", "x"]), + ({}, ["x", "y", "x"]), + ({"z": "a"}, ["x", "y", "x"]), + ({"y": "z", "a": "b"}, ["x", "z", "x"]), + ], +) +def test_name_mi_with_dict_like_duplicate_names(func, rename_dict, exp_names): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=["x", "y", "x"]) + result = getattr(mi, func)(rename_dict) + expected = MultiIndex.from_arrays([[1, 2], [3, 4], [5, 6]], names=exp_names) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("func", ["rename", "set_names"]) +@pytest.mark.parametrize( + "rename_dict, exp_names", + [ + ({"x": "z"}, ["z", "y"]), + ({"x": "z", "y": "x"}, ["z", "x"]), + ({"a": "z"}, ["x", "y"]), + ({}, ["x", "y"]), + ], +) +def test_name_mi_with_dict_like(func, rename_dict, exp_names): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"]) + result = getattr(mi, func)(rename_dict) + expected = MultiIndex.from_arrays([[1, 2], [3, 4]], names=exp_names) + tm.assert_index_equal(result, expected) + + +def test_index_name_with_dict_like_raising(): + # GH#20421 + ix = pd.Index([1, 2]) + msg = "Can only pass dict-like as `names` for MultiIndex." + with pytest.raises(TypeError, match=msg): + ix.set_names({"x": "z"}) + + +def test_multiindex_name_and_level_raising(): + # GH#20421 + mi = MultiIndex.from_arrays([[1, 2], [3, 4]], names=["x", "y"]) + with pytest.raises(TypeError, match="Can not pass level for dictlike `names`."): + mi.set_names(names={"x": "z"}, level={"x": "z"}) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py new file mode 100644 index 0000000000000000000000000000000000000000..8dfca117a658b2a163ef35699c903ad14a032062 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py @@ -0,0 +1,73 @@ +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +class TestContains: + def test_contains(self): + interval = Interval(0, 1) + assert 0.5 in interval + assert 1 in interval + assert 0 not in interval + + interval_both = Interval(0, 1, "both") + assert 0 in interval_both + assert 1 in interval_both + + interval_neither = Interval(0, 1, closed="neither") + assert 0 not in interval_neither + assert 0.5 in interval_neither + assert 1 not in interval_neither + + def test_contains_interval(self, inclusive_endpoints_fixture): + interval1 = Interval(0, 1, "both") + interval2 = Interval(0, 1, inclusive_endpoints_fixture) + assert interval1 in interval1 + assert interval2 in interval2 + assert interval2 in interval1 + assert interval1 not in interval2 or inclusive_endpoints_fixture == "both" + + def test_contains_infinite_length(self): + interval1 = Interval(0, 1, "both") + interval2 = Interval(float("-inf"), float("inf"), "neither") + assert interval1 in interval2 + assert interval2 not in interval1 + + def test_contains_zero_length(self): + interval1 = Interval(0, 1, "both") + interval2 = Interval(-1, -1, "both") + interval3 = Interval(0.5, 0.5, "both") + assert interval2 not in interval1 + assert interval3 in interval1 + assert interval2 not in interval3 and interval3 not in interval2 + assert interval1 not in interval2 and interval1 not in interval3 + + @pytest.mark.parametrize( + "type1", + [ + (0, 1), + (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)), + (Timedelta("0h"), Timedelta("1h")), + ], + ) + @pytest.mark.parametrize( + "type2", + [ + (0, 1), + (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)), + (Timedelta("0h"), Timedelta("1h")), + ], + ) + def test_contains_mixed_types(self, type1, type2): + interval1 = Interval(*type1) + interval2 = Interval(*type2) + if type1 == type2: + assert interval1 in interval2 + else: + msg = "^'<=' not supported between instances of" + with pytest.raises(TypeError, match=msg): + interval1 in interval2 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..91b31e82f9c524f87e2849360cfd44b2f77b0c9c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py @@ -0,0 +1,87 @@ +import numpy as np +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +@pytest.fixture +def interval(): + return Interval(0, 1) + + +class TestInterval: + def test_properties(self, interval): + assert interval.closed == "right" + assert interval.left == 0 + assert interval.right == 1 + assert interval.mid == 0.5 + + def test_hash(self, interval): + # should not raise + hash(interval) + + @pytest.mark.parametrize( + "left, right, expected", + [ + (0, 5, 5), + (-2, 5.5, 7.5), + (10, 10, 0), + (10, np.inf, np.inf), + (-np.inf, -5, np.inf), + (-np.inf, np.inf, np.inf), + (Timedelta("0 days"), Timedelta("5 days"), Timedelta("5 days")), + (Timedelta("10 days"), Timedelta("10 days"), Timedelta("0 days")), + (Timedelta("1h10min"), Timedelta("5h5min"), Timedelta("3h55min")), + (Timedelta("5s"), Timedelta("1h"), Timedelta("59min55s")), + ], + ) + def test_length(self, left, right, expected): + # GH 18789 + iv = Interval(left, right) + result = iv.length + assert result == expected + + @pytest.mark.parametrize( + "left, right, expected", + [ + ("2017-01-01", "2017-01-06", "5 days"), + ("2017-01-01", "2017-01-01 12:00:00", "12 hours"), + ("2017-01-01 12:00", "2017-01-01 12:00:00", "0 days"), + ("2017-01-01 12:01", "2017-01-05 17:31:00", "4 days 5 hours 30 min"), + ], + ) + @pytest.mark.parametrize("tz", (None, "UTC", "CET", "US/Eastern")) + def test_length_timestamp(self, tz, left, right, expected): + # GH 18789 + iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz)) + result = iv.length + expected = Timedelta(expected) + assert result == expected + + @pytest.mark.parametrize( + "left, right", + [ + (0, 1), + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timestamp("2018-01-02")), + ( + Timestamp("2018-01-01", tz="US/Eastern"), + Timestamp("2018-01-02", tz="US/Eastern"), + ), + ], + ) + def test_is_empty(self, left, right, closed): + # GH27219 + # non-empty always return False + iv = Interval(left, right, closed) + assert iv.is_empty is False + + # same endpoint is empty except when closed='both' (contains one point) + iv = Interval(left, left, closed) + result = iv.is_empty + expected = closed != "both" + assert result is expected diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py new file mode 100644 index 0000000000000000000000000000000000000000..7fcf59d7bb4afc0077884de68dc335aff25c2cc5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py @@ -0,0 +1,67 @@ +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +@pytest.fixture( + params=[ + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timedelta("1 day")), + (0, 1), + ], + ids=lambda x: type(x[0]).__name__, +) +def start_shift(request): + """ + Fixture for generating intervals of types from a start value and a shift + value that can be added to start to generate an endpoint + """ + return request.param + + +class TestOverlaps: + def test_overlaps_self(self, start_shift, closed): + start, shift = start_shift + interval = Interval(start, start + shift, closed) + assert interval.overlaps(interval) + + def test_overlaps_nested(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + 3 * shift, other_closed) + interval2 = Interval(start + shift, start + 2 * shift, closed) + + # nested intervals should always overlap + assert interval1.overlaps(interval2) + + def test_overlaps_disjoint(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + shift, other_closed) + interval2 = Interval(start + 2 * shift, start + 3 * shift, closed) + + # disjoint intervals should never overlap + assert not interval1.overlaps(interval2) + + def test_overlaps_endpoint(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + shift, other_closed) + interval2 = Interval(start + shift, start + 2 * shift, closed) + + # overlap if shared endpoint is closed for both (overlap at a point) + result = interval1.overlaps(interval2) + expected = interval1.closed_right and interval2.closed_left + assert result == expected + + @pytest.mark.parametrize( + "other", + [10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")], + ids=lambda x: type(x).__name__, + ) + def test_overlaps_invalid_type(self, other): + interval = Interval(0, 1) + msg = f"`other` must be an Interval, got {type(other).__name__}" + with pytest.raises(TypeError, match=msg): + interval.overlaps(other) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a360b176ea08a23d88c9465f640e4fd7a5e1f9de Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_arithmetic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..231605520df22a7c0d1ee7b2bafa1caee7c7c22e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_asfreq.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_asfreq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e02b5d244174b864ada406786768ff805cee90e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_asfreq.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_period.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bcf648514070ba28ccde6c6cfe18269856e529a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/__pycache__/test_period.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/test_asfreq.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/test_asfreq.py new file mode 100644 index 0000000000000000000000000000000000000000..73c4d8061c25789c1ec2a5e4d2c2851d4066a90e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/test_asfreq.py @@ -0,0 +1,828 @@ +import pytest + +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG +from pandas.errors import OutOfBoundsDatetime + +from pandas import ( + Period, + Timestamp, + offsets, +) +import pandas._testing as tm + +bday_msg = "Period with BDay freq is deprecated" + + +class TestFreqConversion: + """Test frequency conversion of date objects""" + + @pytest.mark.filterwarnings("ignore:Period with BDay:FutureWarning") + @pytest.mark.parametrize("freq", ["Y", "Q", "M", "W", "B", "D"]) + def test_asfreq_near_zero(self, freq): + # GH#19643, GH#19650 + per = Period("0001-01-01", freq=freq) + tup1 = (per.year, per.hour, per.day) + + prev = per - 1 + assert prev.ordinal == per.ordinal - 1 + tup2 = (prev.year, prev.month, prev.day) + assert tup2 < tup1 + + def test_asfreq_near_zero_weekly(self): + # GH#19834 + per1 = Period("0001-01-01", "D") + 6 + per2 = Period("0001-01-01", "D") - 6 + week1 = per1.asfreq("W") + week2 = per2.asfreq("W") + assert week1 != week2 + assert week1.asfreq("D", "E") >= per1 + assert week2.asfreq("D", "S") <= per2 + + def test_to_timestamp_out_of_bounds(self): + # GH#19643, used to incorrectly give Timestamp in 1754 + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + per = Period("0001-01-01", freq="B") + msg = "Out of bounds nanosecond timestamp" + with pytest.raises(OutOfBoundsDatetime, match=msg): + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + per.to_timestamp() + + def test_asfreq_corner(self): + val = Period(freq="Y", year=2007) + result1 = val.asfreq("5min") + result2 = val.asfreq("min") + expected = Period("2007-12-31 23:59", freq="min") + assert result1.ordinal == expected.ordinal + assert result1.freqstr == "5min" + assert result2.ordinal == expected.ordinal + assert result2.freqstr == "min" + + def test_conv_annual(self): + # frequency conversion tests: from Annual Frequency + + ival_A = Period(freq="Y", year=2007) + + ival_AJAN = Period(freq="Y-JAN", year=2007) + ival_AJUN = Period(freq="Y-JUN", year=2007) + ival_ANOV = Period(freq="Y-NOV", year=2007) + + ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1) + ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4) + ival_A_to_M_start = Period(freq="M", year=2007, month=1) + ival_A_to_M_end = Period(freq="M", year=2007, month=12) + ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1) + ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1) + ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31) + ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31) + ival_A_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_A_to_H_end = Period(freq="h", year=2007, month=12, day=31, hour=23) + ival_A_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_A_to_T_end = Period( + freq="Min", year=2007, month=12, day=31, hour=23, minute=59 + ) + ival_A_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_A_to_S_end = Period( + freq="s", year=2007, month=12, day=31, hour=23, minute=59, second=59 + ) + + ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31) + ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1) + ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30) + ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1) + ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30) + ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1) + + assert ival_A.asfreq("Q", "s") == ival_A_to_Q_start + assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end + assert ival_A.asfreq("M", "s") == ival_A_to_M_start + assert ival_A.asfreq("M", "E") == ival_A_to_M_end + assert ival_A.asfreq("W", "s") == ival_A_to_W_start + assert ival_A.asfreq("W", "E") == ival_A_to_W_end + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_A.asfreq("B", "s") == ival_A_to_B_start + assert ival_A.asfreq("B", "E") == ival_A_to_B_end + assert ival_A.asfreq("D", "s") == ival_A_to_D_start + assert ival_A.asfreq("D", "E") == ival_A_to_D_end + msg = "'H' is deprecated and will be removed in a future version." + with tm.assert_produces_warning(FutureWarning, match=msg): + assert ival_A.asfreq("H", "s") == ival_A_to_H_start + assert ival_A.asfreq("H", "E") == ival_A_to_H_end + assert ival_A.asfreq("min", "s") == ival_A_to_T_start + assert ival_A.asfreq("min", "E") == ival_A_to_T_end + msg = "'T' is deprecated and will be removed in a future version." + with tm.assert_produces_warning(FutureWarning, match=msg): + assert ival_A.asfreq("T", "s") == ival_A_to_T_start + assert ival_A.asfreq("T", "E") == ival_A_to_T_end + msg = "'S' is deprecated and will be removed in a future version." + with tm.assert_produces_warning(FutureWarning, match=msg): + assert ival_A.asfreq("S", "S") == ival_A_to_S_start + assert ival_A.asfreq("S", "E") == ival_A_to_S_end + + assert ival_AJAN.asfreq("D", "s") == ival_AJAN_to_D_start + assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end + + assert ival_AJUN.asfreq("D", "s") == ival_AJUN_to_D_start + assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end + + assert ival_ANOV.asfreq("D", "s") == ival_ANOV_to_D_start + assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end + + assert ival_A.asfreq("Y") == ival_A + + def test_conv_quarterly(self): + # frequency conversion tests: from Quarterly Frequency + + ival_Q = Period(freq="Q", year=2007, quarter=1) + ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4) + + ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1) + ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1) + + ival_Q_to_A = Period(freq="Y", year=2007) + ival_Q_to_M_start = Period(freq="M", year=2007, month=1) + ival_Q_to_M_end = Period(freq="M", year=2007, month=3) + ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1) + ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1) + ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30) + ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31) + ival_Q_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_Q_to_H_end = Period(freq="h", year=2007, month=3, day=31, hour=23) + ival_Q_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_Q_to_T_end = Period( + freq="Min", year=2007, month=3, day=31, hour=23, minute=59 + ) + ival_Q_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_Q_to_S_end = Period( + freq="s", year=2007, month=3, day=31, hour=23, minute=59, second=59 + ) + + ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1) + ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30) + + ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1) + ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30) + + assert ival_Q.asfreq("Y") == ival_Q_to_A + assert ival_Q_end_of_year.asfreq("Y") == ival_Q_to_A + + assert ival_Q.asfreq("M", "s") == ival_Q_to_M_start + assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end + assert ival_Q.asfreq("W", "s") == ival_Q_to_W_start + assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_Q.asfreq("B", "s") == ival_Q_to_B_start + assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end + assert ival_Q.asfreq("D", "s") == ival_Q_to_D_start + assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end + assert ival_Q.asfreq("h", "s") == ival_Q_to_H_start + assert ival_Q.asfreq("h", "E") == ival_Q_to_H_end + assert ival_Q.asfreq("Min", "s") == ival_Q_to_T_start + assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end + assert ival_Q.asfreq("s", "s") == ival_Q_to_S_start + assert ival_Q.asfreq("s", "E") == ival_Q_to_S_end + + assert ival_QEJAN.asfreq("D", "s") == ival_QEJAN_to_D_start + assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end + assert ival_QEJUN.asfreq("D", "s") == ival_QEJUN_to_D_start + assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end + + assert ival_Q.asfreq("Q") == ival_Q + + def test_conv_monthly(self): + # frequency conversion tests: from Monthly Frequency + + ival_M = Period(freq="M", year=2007, month=1) + ival_M_end_of_year = Period(freq="M", year=2007, month=12) + ival_M_end_of_quarter = Period(freq="M", year=2007, month=3) + ival_M_to_A = Period(freq="Y", year=2007) + ival_M_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1) + ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1) + ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31) + ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31) + ival_M_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_M_to_H_end = Period(freq="h", year=2007, month=1, day=31, hour=23) + ival_M_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_M_to_T_end = Period( + freq="Min", year=2007, month=1, day=31, hour=23, minute=59 + ) + ival_M_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_M_to_S_end = Period( + freq="s", year=2007, month=1, day=31, hour=23, minute=59, second=59 + ) + + assert ival_M.asfreq("Y") == ival_M_to_A + assert ival_M_end_of_year.asfreq("Y") == ival_M_to_A + assert ival_M.asfreq("Q") == ival_M_to_Q + assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q + + assert ival_M.asfreq("W", "s") == ival_M_to_W_start + assert ival_M.asfreq("W", "E") == ival_M_to_W_end + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_M.asfreq("B", "s") == ival_M_to_B_start + assert ival_M.asfreq("B", "E") == ival_M_to_B_end + assert ival_M.asfreq("D", "s") == ival_M_to_D_start + assert ival_M.asfreq("D", "E") == ival_M_to_D_end + assert ival_M.asfreq("h", "s") == ival_M_to_H_start + assert ival_M.asfreq("h", "E") == ival_M_to_H_end + assert ival_M.asfreq("Min", "s") == ival_M_to_T_start + assert ival_M.asfreq("Min", "E") == ival_M_to_T_end + assert ival_M.asfreq("s", "s") == ival_M_to_S_start + assert ival_M.asfreq("s", "E") == ival_M_to_S_end + + assert ival_M.asfreq("M") == ival_M + + def test_conv_weekly(self): + # frequency conversion tests: from Weekly Frequency + ival_W = Period(freq="W", year=2007, month=1, day=1) + + ival_WSUN = Period(freq="W", year=2007, month=1, day=7) + ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6) + ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5) + ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4) + ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3) + ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2) + ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1) + + ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7) + ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31) + ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6) + ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30) + ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5) + ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29) + ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4) + ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28) + ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3) + ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27) + ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2) + ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26) + ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1) + + ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31) + ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31) + ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31) + ival_W_to_A = Period(freq="Y", year=2007) + ival_W_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_W_to_M = Period(freq="M", year=2007, month=1) + + if Period(freq="D", year=2007, month=12, day=31).weekday == 6: + ival_W_to_A_end_of_year = Period(freq="Y", year=2007) + else: + ival_W_to_A_end_of_year = Period(freq="Y", year=2008) + + if Period(freq="D", year=2007, month=3, day=31).weekday == 6: + ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1) + else: + ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2) + + if Period(freq="D", year=2007, month=1, day=31).weekday == 6: + ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1) + else: + ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2) + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1) + ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5) + ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1) + ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7) + ival_W_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_W_to_H_end = Period(freq="h", year=2007, month=1, day=7, hour=23) + ival_W_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_W_to_T_end = Period( + freq="Min", year=2007, month=1, day=7, hour=23, minute=59 + ) + ival_W_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_W_to_S_end = Period( + freq="s", year=2007, month=1, day=7, hour=23, minute=59, second=59 + ) + + assert ival_W.asfreq("Y") == ival_W_to_A + assert ival_W_end_of_year.asfreq("Y") == ival_W_to_A_end_of_year + + assert ival_W.asfreq("Q") == ival_W_to_Q + assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter + + assert ival_W.asfreq("M") == ival_W_to_M + assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_W.asfreq("B", "s") == ival_W_to_B_start + assert ival_W.asfreq("B", "E") == ival_W_to_B_end + + assert ival_W.asfreq("D", "s") == ival_W_to_D_start + assert ival_W.asfreq("D", "E") == ival_W_to_D_end + + assert ival_WSUN.asfreq("D", "s") == ival_WSUN_to_D_start + assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end + assert ival_WSAT.asfreq("D", "s") == ival_WSAT_to_D_start + assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end + assert ival_WFRI.asfreq("D", "s") == ival_WFRI_to_D_start + assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end + assert ival_WTHU.asfreq("D", "s") == ival_WTHU_to_D_start + assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end + assert ival_WWED.asfreq("D", "s") == ival_WWED_to_D_start + assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end + assert ival_WTUE.asfreq("D", "s") == ival_WTUE_to_D_start + assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end + assert ival_WMON.asfreq("D", "s") == ival_WMON_to_D_start + assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end + + assert ival_W.asfreq("h", "s") == ival_W_to_H_start + assert ival_W.asfreq("h", "E") == ival_W_to_H_end + assert ival_W.asfreq("Min", "s") == ival_W_to_T_start + assert ival_W.asfreq("Min", "E") == ival_W_to_T_end + assert ival_W.asfreq("s", "s") == ival_W_to_S_start + assert ival_W.asfreq("s", "E") == ival_W_to_S_end + + assert ival_W.asfreq("W") == ival_W + + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + ival_W.asfreq("WK") + + def test_conv_weekly_legacy(self): + # frequency conversion tests: from Weekly Frequency + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + Period(freq="WK", year=2007, month=1, day=1) + + with pytest.raises(ValueError, match=msg): + Period(freq="WK-SAT", year=2007, month=1, day=6) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-FRI", year=2007, month=1, day=5) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-THU", year=2007, month=1, day=4) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-WED", year=2007, month=1, day=3) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-TUE", year=2007, month=1, day=2) + with pytest.raises(ValueError, match=msg): + Period(freq="WK-MON", year=2007, month=1, day=1) + + def test_conv_business(self): + # frequency conversion tests: from Business Frequency" + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_B = Period(freq="B", year=2007, month=1, day=1) + ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31) + ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30) + ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31) + ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5) + + ival_B_to_A = Period(freq="Y", year=2007) + ival_B_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_B_to_M = Period(freq="M", year=2007, month=1) + ival_B_to_W = Period(freq="W", year=2007, month=1, day=7) + ival_B_to_D = Period(freq="D", year=2007, month=1, day=1) + ival_B_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_B_to_H_end = Period(freq="h", year=2007, month=1, day=1, hour=23) + ival_B_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_B_to_T_end = Period( + freq="Min", year=2007, month=1, day=1, hour=23, minute=59 + ) + ival_B_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_B_to_S_end = Period( + freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 + ) + + assert ival_B.asfreq("Y") == ival_B_to_A + assert ival_B_end_of_year.asfreq("Y") == ival_B_to_A + assert ival_B.asfreq("Q") == ival_B_to_Q + assert ival_B_end_of_quarter.asfreq("Q") == ival_B_to_Q + assert ival_B.asfreq("M") == ival_B_to_M + assert ival_B_end_of_month.asfreq("M") == ival_B_to_M + assert ival_B.asfreq("W") == ival_B_to_W + assert ival_B_end_of_week.asfreq("W") == ival_B_to_W + + assert ival_B.asfreq("D") == ival_B_to_D + + assert ival_B.asfreq("h", "s") == ival_B_to_H_start + assert ival_B.asfreq("h", "E") == ival_B_to_H_end + assert ival_B.asfreq("Min", "s") == ival_B_to_T_start + assert ival_B.asfreq("Min", "E") == ival_B_to_T_end + assert ival_B.asfreq("s", "s") == ival_B_to_S_start + assert ival_B.asfreq("s", "E") == ival_B_to_S_end + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_B.asfreq("B") == ival_B + + def test_conv_daily(self): + # frequency conversion tests: from Business Frequency" + + ival_D = Period(freq="D", year=2007, month=1, day=1) + ival_D_end_of_year = Period(freq="D", year=2007, month=12, day=31) + ival_D_end_of_quarter = Period(freq="D", year=2007, month=3, day=31) + ival_D_end_of_month = Period(freq="D", year=2007, month=1, day=31) + ival_D_end_of_week = Period(freq="D", year=2007, month=1, day=7) + + ival_D_friday = Period(freq="D", year=2007, month=1, day=5) + ival_D_saturday = Period(freq="D", year=2007, month=1, day=6) + ival_D_sunday = Period(freq="D", year=2007, month=1, day=7) + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_B_friday = Period(freq="B", year=2007, month=1, day=5) + ival_B_monday = Period(freq="B", year=2007, month=1, day=8) + + ival_D_to_A = Period(freq="Y", year=2007) + + ival_Deoq_to_AJAN = Period(freq="Y-JAN", year=2008) + ival_Deoq_to_AJUN = Period(freq="Y-JUN", year=2007) + ival_Deoq_to_ADEC = Period(freq="Y-DEC", year=2007) + + ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4) + ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3) + ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1) + + ival_D_to_M = Period(freq="M", year=2007, month=1) + ival_D_to_W = Period(freq="W", year=2007, month=1, day=7) + + ival_D_to_H_start = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_D_to_H_end = Period(freq="h", year=2007, month=1, day=1, hour=23) + ival_D_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_D_to_T_end = Period( + freq="Min", year=2007, month=1, day=1, hour=23, minute=59 + ) + ival_D_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_D_to_S_end = Period( + freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 + ) + + assert ival_D.asfreq("Y") == ival_D_to_A + + assert ival_D_end_of_quarter.asfreq("Y-JAN") == ival_Deoq_to_AJAN + assert ival_D_end_of_quarter.asfreq("Y-JUN") == ival_Deoq_to_AJUN + assert ival_D_end_of_quarter.asfreq("Y-DEC") == ival_Deoq_to_ADEC + + assert ival_D_end_of_year.asfreq("Y") == ival_D_to_A + assert ival_D_end_of_quarter.asfreq("Q") == ival_D_to_QEDEC + assert ival_D.asfreq("Q-JAN") == ival_D_to_QEJAN + assert ival_D.asfreq("Q-JUN") == ival_D_to_QEJUN + assert ival_D.asfreq("Q-DEC") == ival_D_to_QEDEC + assert ival_D.asfreq("M") == ival_D_to_M + assert ival_D_end_of_month.asfreq("M") == ival_D_to_M + assert ival_D.asfreq("W") == ival_D_to_W + assert ival_D_end_of_week.asfreq("W") == ival_D_to_W + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_D_friday.asfreq("B") == ival_B_friday + assert ival_D_saturday.asfreq("B", "s") == ival_B_friday + assert ival_D_saturday.asfreq("B", "E") == ival_B_monday + assert ival_D_sunday.asfreq("B", "s") == ival_B_friday + assert ival_D_sunday.asfreq("B", "E") == ival_B_monday + + assert ival_D.asfreq("h", "s") == ival_D_to_H_start + assert ival_D.asfreq("h", "E") == ival_D_to_H_end + assert ival_D.asfreq("Min", "s") == ival_D_to_T_start + assert ival_D.asfreq("Min", "E") == ival_D_to_T_end + assert ival_D.asfreq("s", "s") == ival_D_to_S_start + assert ival_D.asfreq("s", "E") == ival_D_to_S_end + + assert ival_D.asfreq("D") == ival_D + + def test_conv_hourly(self): + # frequency conversion tests: from Hourly Frequency" + + ival_H = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_H_end_of_year = Period(freq="h", year=2007, month=12, day=31, hour=23) + ival_H_end_of_quarter = Period(freq="h", year=2007, month=3, day=31, hour=23) + ival_H_end_of_month = Period(freq="h", year=2007, month=1, day=31, hour=23) + ival_H_end_of_week = Period(freq="h", year=2007, month=1, day=7, hour=23) + ival_H_end_of_day = Period(freq="h", year=2007, month=1, day=1, hour=23) + ival_H_end_of_bus = Period(freq="h", year=2007, month=1, day=1, hour=23) + + ival_H_to_A = Period(freq="Y", year=2007) + ival_H_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_H_to_M = Period(freq="M", year=2007, month=1) + ival_H_to_W = Period(freq="W", year=2007, month=1, day=7) + ival_H_to_D = Period(freq="D", year=2007, month=1, day=1) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_H_to_B = Period(freq="B", year=2007, month=1, day=1) + + ival_H_to_T_start = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0 + ) + ival_H_to_T_end = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=59 + ) + ival_H_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_H_to_S_end = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=59, second=59 + ) + + assert ival_H.asfreq("Y") == ival_H_to_A + assert ival_H_end_of_year.asfreq("Y") == ival_H_to_A + assert ival_H.asfreq("Q") == ival_H_to_Q + assert ival_H_end_of_quarter.asfreq("Q") == ival_H_to_Q + assert ival_H.asfreq("M") == ival_H_to_M + assert ival_H_end_of_month.asfreq("M") == ival_H_to_M + assert ival_H.asfreq("W") == ival_H_to_W + assert ival_H_end_of_week.asfreq("W") == ival_H_to_W + assert ival_H.asfreq("D") == ival_H_to_D + assert ival_H_end_of_day.asfreq("D") == ival_H_to_D + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_H.asfreq("B") == ival_H_to_B + assert ival_H_end_of_bus.asfreq("B") == ival_H_to_B + + assert ival_H.asfreq("Min", "s") == ival_H_to_T_start + assert ival_H.asfreq("Min", "E") == ival_H_to_T_end + assert ival_H.asfreq("s", "s") == ival_H_to_S_start + assert ival_H.asfreq("s", "E") == ival_H_to_S_end + + assert ival_H.asfreq("h") == ival_H + + def test_conv_minutely(self): + # frequency conversion tests: from Minutely Frequency" + + ival_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0) + ival_T_end_of_year = Period( + freq="Min", year=2007, month=12, day=31, hour=23, minute=59 + ) + ival_T_end_of_quarter = Period( + freq="Min", year=2007, month=3, day=31, hour=23, minute=59 + ) + ival_T_end_of_month = Period( + freq="Min", year=2007, month=1, day=31, hour=23, minute=59 + ) + ival_T_end_of_week = Period( + freq="Min", year=2007, month=1, day=7, hour=23, minute=59 + ) + ival_T_end_of_day = Period( + freq="Min", year=2007, month=1, day=1, hour=23, minute=59 + ) + ival_T_end_of_bus = Period( + freq="Min", year=2007, month=1, day=1, hour=23, minute=59 + ) + ival_T_end_of_hour = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=59 + ) + + ival_T_to_A = Period(freq="Y", year=2007) + ival_T_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_T_to_M = Period(freq="M", year=2007, month=1) + ival_T_to_W = Period(freq="W", year=2007, month=1, day=7) + ival_T_to_D = Period(freq="D", year=2007, month=1, day=1) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_T_to_B = Period(freq="B", year=2007, month=1, day=1) + ival_T_to_H = Period(freq="h", year=2007, month=1, day=1, hour=0) + + ival_T_to_S_start = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + ival_T_to_S_end = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=59 + ) + + assert ival_T.asfreq("Y") == ival_T_to_A + assert ival_T_end_of_year.asfreq("Y") == ival_T_to_A + assert ival_T.asfreq("Q") == ival_T_to_Q + assert ival_T_end_of_quarter.asfreq("Q") == ival_T_to_Q + assert ival_T.asfreq("M") == ival_T_to_M + assert ival_T_end_of_month.asfreq("M") == ival_T_to_M + assert ival_T.asfreq("W") == ival_T_to_W + assert ival_T_end_of_week.asfreq("W") == ival_T_to_W + assert ival_T.asfreq("D") == ival_T_to_D + assert ival_T_end_of_day.asfreq("D") == ival_T_to_D + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_T.asfreq("B") == ival_T_to_B + assert ival_T_end_of_bus.asfreq("B") == ival_T_to_B + assert ival_T.asfreq("h") == ival_T_to_H + assert ival_T_end_of_hour.asfreq("h") == ival_T_to_H + + assert ival_T.asfreq("s", "s") == ival_T_to_S_start + assert ival_T.asfreq("s", "E") == ival_T_to_S_end + + assert ival_T.asfreq("Min") == ival_T + + def test_conv_secondly(self): + # frequency conversion tests: from Secondly Frequency" + + ival_S = Period(freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=0) + ival_S_end_of_year = Period( + freq="s", year=2007, month=12, day=31, hour=23, minute=59, second=59 + ) + ival_S_end_of_quarter = Period( + freq="s", year=2007, month=3, day=31, hour=23, minute=59, second=59 + ) + ival_S_end_of_month = Period( + freq="s", year=2007, month=1, day=31, hour=23, minute=59, second=59 + ) + ival_S_end_of_week = Period( + freq="s", year=2007, month=1, day=7, hour=23, minute=59, second=59 + ) + ival_S_end_of_day = Period( + freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 + ) + ival_S_end_of_bus = Period( + freq="s", year=2007, month=1, day=1, hour=23, minute=59, second=59 + ) + ival_S_end_of_hour = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=59, second=59 + ) + ival_S_end_of_minute = Period( + freq="s", year=2007, month=1, day=1, hour=0, minute=0, second=59 + ) + + ival_S_to_A = Period(freq="Y", year=2007) + ival_S_to_Q = Period(freq="Q", year=2007, quarter=1) + ival_S_to_M = Period(freq="M", year=2007, month=1) + ival_S_to_W = Period(freq="W", year=2007, month=1, day=7) + ival_S_to_D = Period(freq="D", year=2007, month=1, day=1) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + ival_S_to_B = Period(freq="B", year=2007, month=1, day=1) + ival_S_to_H = Period(freq="h", year=2007, month=1, day=1, hour=0) + ival_S_to_T = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0) + + assert ival_S.asfreq("Y") == ival_S_to_A + assert ival_S_end_of_year.asfreq("Y") == ival_S_to_A + assert ival_S.asfreq("Q") == ival_S_to_Q + assert ival_S_end_of_quarter.asfreq("Q") == ival_S_to_Q + assert ival_S.asfreq("M") == ival_S_to_M + assert ival_S_end_of_month.asfreq("M") == ival_S_to_M + assert ival_S.asfreq("W") == ival_S_to_W + assert ival_S_end_of_week.asfreq("W") == ival_S_to_W + assert ival_S.asfreq("D") == ival_S_to_D + assert ival_S_end_of_day.asfreq("D") == ival_S_to_D + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert ival_S.asfreq("B") == ival_S_to_B + assert ival_S_end_of_bus.asfreq("B") == ival_S_to_B + assert ival_S.asfreq("h") == ival_S_to_H + assert ival_S_end_of_hour.asfreq("h") == ival_S_to_H + assert ival_S.asfreq("Min") == ival_S_to_T + assert ival_S_end_of_minute.asfreq("Min") == ival_S_to_T + + assert ival_S.asfreq("s") == ival_S + + def test_conv_microsecond(self): + # GH#31475 Avoid floating point errors dropping the start_time to + # before the beginning of the Period + per = Period("2020-01-30 15:57:27.576166", freq="us") + assert per.ordinal == 1580399847576166 + + start = per.start_time + expected = Timestamp("2020-01-30 15:57:27.576166") + assert start == expected + assert start._value == per.ordinal * 1000 + + per2 = Period("2300-01-01", "us") + msg = "2300-01-01" + with pytest.raises(OutOfBoundsDatetime, match=msg): + per2.start_time + with pytest.raises(OutOfBoundsDatetime, match=msg): + per2.end_time + + def test_asfreq_mult(self): + # normal freq to mult freq + p = Period(freq="Y", year=2007) + # ordinal will not change + for freq in ["3Y", offsets.YearEnd(3)]: + result = p.asfreq(freq) + expected = Period("2007", freq="3Y") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + # ordinal will not change + for freq in ["3Y", offsets.YearEnd(3)]: + result = p.asfreq(freq, how="S") + expected = Period("2007", freq="3Y") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + # mult freq to normal freq + p = Period(freq="3Y", year=2007) + # ordinal will change because how=E is the default + for freq in ["Y", offsets.YearEnd()]: + result = p.asfreq(freq) + expected = Period("2009", freq="Y") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + # ordinal will not change + for freq in ["Y", offsets.YearEnd()]: + result = p.asfreq(freq, how="s") + expected = Period("2007", freq="Y") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + p = Period(freq="Y", year=2007) + for freq in ["2M", offsets.MonthEnd(2)]: + result = p.asfreq(freq) + expected = Period("2007-12", freq="2M") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + for freq in ["2M", offsets.MonthEnd(2)]: + result = p.asfreq(freq, how="s") + expected = Period("2007-01", freq="2M") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + p = Period(freq="3Y", year=2007) + for freq in ["2M", offsets.MonthEnd(2)]: + result = p.asfreq(freq) + expected = Period("2009-12", freq="2M") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + for freq in ["2M", offsets.MonthEnd(2)]: + result = p.asfreq(freq, how="s") + expected = Period("2007-01", freq="2M") + + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + def test_asfreq_combined(self): + # normal freq to combined freq + p = Period("2007", freq="h") + + # ordinal will not change + expected = Period("2007", freq="25h") + for freq, how in zip(["1D1h", "1h1D"], ["E", "S"]): + result = p.asfreq(freq, how=how) + assert result == expected + assert result.ordinal == expected.ordinal + assert result.freq == expected.freq + + # combined freq to normal freq + p1 = Period(freq="1D1h", year=2007) + p2 = Period(freq="1h1D", year=2007) + + # ordinal will change because how=E is the default + result1 = p1.asfreq("h") + result2 = p2.asfreq("h") + expected = Period("2007-01-02", freq="h") + assert result1 == expected + assert result1.ordinal == expected.ordinal + assert result1.freq == expected.freq + assert result2 == expected + assert result2.ordinal == expected.ordinal + assert result2.freq == expected.freq + + # ordinal will not change + result1 = p1.asfreq("h", how="S") + result2 = p2.asfreq("h", how="S") + expected = Period("2007-01-01", freq="h") + assert result1 == expected + assert result1.ordinal == expected.ordinal + assert result1.freq == expected.freq + assert result2 == expected + assert result2.ordinal == expected.ordinal + assert result2.freq == expected.freq + + def test_asfreq_MS(self): + initial = Period("2013") + + assert initial.asfreq(freq="M", how="S") == Period("2013-01", "M") + + msg = "MS is not supported as period frequency" + with pytest.raises(ValueError, match=msg): + initial.asfreq(freq="MS", how="S") + + with pytest.raises(ValueError, match=msg): + Period("2013-01", "MS") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/test_period.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/test_period.py new file mode 100644 index 0000000000000000000000000000000000000000..2c3a0816737fccba83939c06238ec28a83550750 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/scalar/period/test_period.py @@ -0,0 +1,1154 @@ +from datetime import ( + date, + datetime, + timedelta, +) +import re + +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT +from pandas._libs.tslibs.ccalendar import ( + DAYS, + MONTHS, +) +from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime +from pandas._libs.tslibs.parsing import DateParseError +from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG + +from pandas import ( + NaT, + Period, + Timedelta, + Timestamp, + offsets, +) +import pandas._testing as tm + +bday_msg = "Period with BDay freq is deprecated" + + +class TestPeriodDisallowedFreqs: + @pytest.mark.parametrize( + "freq, freq_msg", + [ + (offsets.BYearBegin(), "BYearBegin"), + (offsets.YearBegin(2), "YearBegin"), + (offsets.QuarterBegin(startingMonth=12), "QuarterBegin"), + (offsets.BusinessMonthEnd(2), "BusinessMonthEnd"), + ], + ) + def test_offsets_not_supported(self, freq, freq_msg): + # GH#55785 + msg = re.escape(f"{freq} is not supported as period frequency") + with pytest.raises(ValueError, match=msg): + Period(year=2014, freq=freq) + + def test_custom_business_day_freq_raises(self): + # GH#52534 + msg = "C is not supported as period frequency" + with pytest.raises(ValueError, match=msg): + Period("2023-04-10", freq="C") + msg = f"{offsets.CustomBusinessDay().base} is not supported as period frequency" + with pytest.raises(ValueError, match=msg): + Period("2023-04-10", freq=offsets.CustomBusinessDay()) + + def test_invalid_frequency_error_message(self): + msg = "WOM-1MON is not supported as period frequency" + with pytest.raises(ValueError, match=msg): + Period("2012-01-02", freq="WOM-1MON") + + def test_invalid_frequency_period_error_message(self): + msg = "for Period, please use 'M' instead of 'ME'" + with pytest.raises(ValueError, match=msg): + Period("2012-01-02", freq="ME") + + +class TestPeriodConstruction: + def test_from_td64nat_raises(self): + # GH#44507 + td = NaT.to_numpy("m8[ns]") + + msg = "Value must be Period, string, integer, or datetime" + with pytest.raises(ValueError, match=msg): + Period(td) + + with pytest.raises(ValueError, match=msg): + Period(td, freq="D") + + def test_construction(self): + i1 = Period("1/1/2005", freq="M") + i2 = Period("Jan 2005") + + assert i1 == i2 + + # GH#54105 - Period can be confusingly instantiated with lowercase freq + # TODO: raise in the future an error when passing lowercase freq + i1 = Period("2005", freq="Y") + i2 = Period("2005") + + assert i1 == i2 + + i4 = Period("2005", freq="M") + assert i1 != i4 + + i1 = Period.now(freq="Q") + i2 = Period(datetime.now(), freq="Q") + + assert i1 == i2 + + # Pass in freq as a keyword argument sometimes as a test for + # https://github.com/pandas-dev/pandas/issues/53369 + i1 = Period.now(freq="D") + i2 = Period(datetime.now(), freq="D") + i3 = Period.now(offsets.Day()) + + assert i1 == i2 + assert i1 == i3 + + i1 = Period("1982", freq="min") + msg = "'MIN' is deprecated and will be removed in a future version." + with tm.assert_produces_warning(FutureWarning, match=msg): + i2 = Period("1982", freq="MIN") + assert i1 == i2 + + i1 = Period(year=2005, month=3, day=1, freq="D") + i2 = Period("3/1/2005", freq="D") + assert i1 == i2 + + i3 = Period(year=2005, month=3, day=1, freq="d") + assert i1 == i3 + + i1 = Period("2007-01-01 09:00:00.001") + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="ms") + assert i1 == expected + + expected = Period("2007-01-01 09:00:00.001", freq="ms") + assert i1 == expected + + i1 = Period("2007-01-01 09:00:00.00101") + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="us") + assert i1 == expected + + expected = Period("2007-01-01 09:00:00.00101", freq="us") + assert i1 == expected + + msg = "Must supply freq for ordinal value" + with pytest.raises(ValueError, match=msg): + Period(ordinal=200701) + + msg = "Invalid frequency: X" + with pytest.raises(ValueError, match=msg): + Period("2007-1-1", freq="X") + + def test_tuple_freq_disallowed(self): + # GH#34703 tuple freq disallowed + with pytest.raises(TypeError, match="pass as a string instead"): + Period("1982", freq=("Min", 1)) + + with pytest.raises(TypeError, match="pass as a string instead"): + Period("2006-12-31", ("w", 1)) + + def test_construction_from_timestamp_nanos(self): + # GH#46811 don't drop nanos from Timestamp + ts = Timestamp("2022-04-20 09:23:24.123456789") + per = Period(ts, freq="ns") + + # should losslessly round-trip, not lose the 789 + rt = per.to_timestamp() + assert rt == ts + + # same thing but from a datetime64 object + dt64 = ts.asm8 + per2 = Period(dt64, freq="ns") + rt2 = per2.to_timestamp() + assert rt2.asm8 == dt64 + + def test_construction_bday(self): + # Biz day construction, roll forward if non-weekday + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + i1 = Period("3/10/12", freq="B") + i2 = Period("3/10/12", freq="D") + assert i1 == i2.asfreq("B") + i2 = Period("3/11/12", freq="D") + assert i1 == i2.asfreq("B") + i2 = Period("3/12/12", freq="D") + assert i1 == i2.asfreq("B") + + i3 = Period("3/10/12", freq="b") + assert i1 == i3 + + i1 = Period(year=2012, month=3, day=10, freq="B") + i2 = Period("3/12/12", freq="B") + assert i1 == i2 + + def test_construction_quarter(self): + i1 = Period(year=2005, quarter=1, freq="Q") + i2 = Period("1/1/2005", freq="Q") + assert i1 == i2 + + i1 = Period(year=2005, quarter=3, freq="Q") + i2 = Period("9/1/2005", freq="Q") + assert i1 == i2 + + i1 = Period("2005Q1") + i2 = Period(year=2005, quarter=1, freq="Q") + i3 = Period("2005q1") + assert i1 == i2 + assert i1 == i3 + + i1 = Period("05Q1") + assert i1 == i2 + lower = Period("05q1") + assert i1 == lower + + i1 = Period("1Q2005") + assert i1 == i2 + lower = Period("1q2005") + assert i1 == lower + + i1 = Period("1Q05") + assert i1 == i2 + lower = Period("1q05") + assert i1 == lower + + i1 = Period("4Q1984") + assert i1.year == 1984 + lower = Period("4q1984") + assert i1 == lower + + def test_construction_month(self): + expected = Period("2007-01", freq="M") + i1 = Period("200701", freq="M") + assert i1 == expected + + i1 = Period("200701", freq="M") + assert i1 == expected + + i1 = Period(200701, freq="M") + assert i1 == expected + + i1 = Period(ordinal=200701, freq="M") + assert i1.year == 18695 + + i1 = Period(datetime(2007, 1, 1), freq="M") + i2 = Period("200701", freq="M") + assert i1 == i2 + + i1 = Period(date(2007, 1, 1), freq="M") + i2 = Period(datetime(2007, 1, 1), freq="M") + i3 = Period(np.datetime64("2007-01-01"), freq="M") + i4 = Period("2007-01-01 00:00:00", freq="M") + i5 = Period("2007-01-01 00:00:00.000", freq="M") + assert i1 == i2 + assert i1 == i3 + assert i1 == i4 + assert i1 == i5 + + def test_period_constructor_offsets(self): + assert Period("1/1/2005", freq=offsets.MonthEnd()) == Period( + "1/1/2005", freq="M" + ) + assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="Y") + assert Period("2005", freq=offsets.MonthEnd()) == Period("2005", freq="M") + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period("3/10/12", freq=offsets.BusinessDay()) == Period( + "3/10/12", freq="B" + ) + assert Period("3/10/12", freq=offsets.Day()) == Period("3/10/12", freq="D") + + assert Period( + year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12) + ) == Period(year=2005, quarter=1, freq="Q") + assert Period( + year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12) + ) == Period(year=2005, quarter=2, freq="Q") + + assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period( + year=2005, month=3, day=1, freq="D" + ) + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period( + year=2012, month=3, day=10, freq="B" + ) + + expected = Period("2005-03-01", freq="3D") + assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected + assert Period(year=2005, month=3, day=1, freq="3D") == expected + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period( + year=2012, month=3, day=10, freq="3B" + ) + + assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq="M") + + i1 = Period(ordinal=200701, freq=offsets.MonthEnd()) + i2 = Period(ordinal=200701, freq="M") + assert i1 == i2 + assert i1.year == 18695 + assert i2.year == 18695 + + i1 = Period(datetime(2007, 1, 1), freq="M") + i2 = Period("200701", freq="M") + assert i1 == i2 + + i1 = Period(date(2007, 1, 1), freq="M") + i2 = Period(datetime(2007, 1, 1), freq="M") + i3 = Period(np.datetime64("2007-01-01"), freq="M") + i4 = Period("2007-01-01 00:00:00", freq="M") + i5 = Period("2007-01-01 00:00:00.000", freq="M") + assert i1 == i2 + assert i1 == i3 + assert i1 == i4 + assert i1 == i5 + + i1 = Period("2007-01-01 09:00:00.001") + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="ms") + assert i1 == expected + + expected = Period("2007-01-01 09:00:00.001", freq="ms") + assert i1 == expected + + i1 = Period("2007-01-01 09:00:00.00101") + expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="us") + assert i1 == expected + + expected = Period("2007-01-01 09:00:00.00101", freq="us") + assert i1 == expected + + def test_invalid_arguments(self): + msg = "Must supply freq for datetime value" + with pytest.raises(ValueError, match=msg): + Period(datetime.now()) + with pytest.raises(ValueError, match=msg): + Period(datetime.now().date()) + + msg = "Value must be Period, string, integer, or datetime" + with pytest.raises(ValueError, match=msg): + Period(1.6, freq="D") + msg = "Ordinal must be an integer" + with pytest.raises(ValueError, match=msg): + Period(ordinal=1.6, freq="D") + msg = "Only value or ordinal but not both should be given but not both" + with pytest.raises(ValueError, match=msg): + Period(ordinal=2, value=1, freq="D") + + msg = "If value is None, freq cannot be None" + with pytest.raises(ValueError, match=msg): + Period(month=1) + + msg = '^Given date string "-2000" not likely a datetime$' + with pytest.raises(ValueError, match=msg): + Period("-2000", "Y") + msg = "day is out of range for month" + with pytest.raises(DateParseError, match=msg): + Period("0", "Y") + msg = "Unknown datetime string format, unable to parse" + with pytest.raises(DateParseError, match=msg): + Period("1/1/-2000", "Y") + + def test_constructor_corner(self): + expected = Period("2007-01", freq="2M") + assert Period(year=2007, month=1, freq="2M") == expected + + assert Period(None) is NaT + + p = Period("2007-01-01", freq="D") + + result = Period(p, freq="Y") + exp = Period("2007", freq="Y") + assert result == exp + + def test_constructor_infer_freq(self): + p = Period("2007-01-01") + assert p.freq == "D" + + p = Period("2007-01-01 07") + assert p.freq == "h" + + p = Period("2007-01-01 07:10") + assert p.freq == "min" + + p = Period("2007-01-01 07:10:15") + assert p.freq == "s" + + p = Period("2007-01-01 07:10:15.123") + assert p.freq == "ms" + + # We see that there are 6 digits after the decimal, so get microsecond + # even though they are all zeros. + p = Period("2007-01-01 07:10:15.123000") + assert p.freq == "us" + + p = Period("2007-01-01 07:10:15.123400") + assert p.freq == "us" + + def test_multiples(self): + result1 = Period("1989", freq="2Y") + result2 = Period("1989", freq="Y") + assert result1.ordinal == result2.ordinal + assert result1.freqstr == "2Y-DEC" + assert result2.freqstr == "Y-DEC" + assert result1.freq == offsets.YearEnd(2) + assert result2.freq == offsets.YearEnd() + + assert (result1 + 1).ordinal == result1.ordinal + 2 + assert (1 + result1).ordinal == result1.ordinal + 2 + assert (result1 - 1).ordinal == result2.ordinal - 2 + assert (-1 + result1).ordinal == result2.ordinal - 2 + + @pytest.mark.parametrize("month", MONTHS) + def test_period_cons_quarterly(self, month): + # bugs in scikits.timeseries + freq = f"Q-{month}" + exp = Period("1989Q3", freq=freq) + assert "1989Q3" in str(exp) + stamp = exp.to_timestamp("D", how="end") + p = Period(stamp, freq=freq) + assert p == exp + + stamp = exp.to_timestamp("3D", how="end") + p = Period(stamp, freq=freq) + assert p == exp + + @pytest.mark.parametrize("month", MONTHS) + def test_period_cons_annual(self, month): + # bugs in scikits.timeseries + freq = f"Y-{month}" + exp = Period("1989", freq=freq) + stamp = exp.to_timestamp("D", how="end") + timedelta(days=30) + p = Period(stamp, freq=freq) + + assert p == exp + 1 + assert isinstance(p, Period) + + @pytest.mark.parametrize("day", DAYS) + @pytest.mark.parametrize("num", range(10, 17)) + def test_period_cons_weekly(self, num, day): + daystr = f"2011-02-{num}" + freq = f"W-{day}" + + result = Period(daystr, freq=freq) + expected = Period(daystr, freq="D").asfreq(freq) + assert result == expected + assert isinstance(result, Period) + + def test_parse_week_str_roundstrip(self): + # GH#50803 + per = Period("2017-01-23/2017-01-29") + assert per.freq.freqstr == "W-SUN" + + per = Period("2017-01-24/2017-01-30") + assert per.freq.freqstr == "W-MON" + + msg = "Could not parse as weekly-freq Period" + with pytest.raises(ValueError, match=msg): + # not 6 days apart + Period("2016-01-23/2017-01-29") + + def test_period_from_ordinal(self): + p = Period("2011-01", freq="M") + res = Period._from_ordinal(p.ordinal, freq=p.freq) + assert p == res + assert isinstance(res, Period) + + @pytest.mark.parametrize("freq", ["Y", "M", "D", "h"]) + def test_construct_from_nat_string_and_freq(self, freq): + per = Period("NaT", freq=freq) + assert per is NaT + + per = Period("NaT", freq="2" + freq) + assert per is NaT + + per = Period("NaT", freq="3" + freq) + assert per is NaT + + def test_period_cons_nat(self): + p = Period("nat", freq="W-SUN") + assert p is NaT + + p = Period(iNaT, freq="D") + assert p is NaT + + p = Period(iNaT, freq="3D") + assert p is NaT + + p = Period(iNaT, freq="1D1h") + assert p is NaT + + p = Period("NaT") + assert p is NaT + + p = Period(iNaT) + assert p is NaT + + def test_period_cons_mult(self): + p1 = Period("2011-01", freq="3M") + p2 = Period("2011-01", freq="M") + assert p1.ordinal == p2.ordinal + + assert p1.freq == offsets.MonthEnd(3) + assert p1.freqstr == "3M" + + assert p2.freq == offsets.MonthEnd() + assert p2.freqstr == "M" + + result = p1 + 1 + assert result.ordinal == (p2 + 3).ordinal + + assert result.freq == p1.freq + assert result.freqstr == "3M" + + result = p1 - 1 + assert result.ordinal == (p2 - 3).ordinal + assert result.freq == p1.freq + assert result.freqstr == "3M" + + msg = "Frequency must be positive, because it represents span: -3M" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="-3M") + + msg = "Frequency must be positive, because it represents span: 0M" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="0M") + + def test_period_cons_combined(self): + p = [ + ( + Period("2011-01", freq="1D1h"), + Period("2011-01", freq="1h1D"), + Period("2011-01", freq="h"), + ), + ( + Period(ordinal=1, freq="1D1h"), + Period(ordinal=1, freq="1h1D"), + Period(ordinal=1, freq="h"), + ), + ] + + for p1, p2, p3 in p: + assert p1.ordinal == p3.ordinal + assert p2.ordinal == p3.ordinal + + assert p1.freq == offsets.Hour(25) + assert p1.freqstr == "25h" + + assert p2.freq == offsets.Hour(25) + assert p2.freqstr == "25h" + + assert p3.freq == offsets.Hour() + assert p3.freqstr == "h" + + result = p1 + 1 + assert result.ordinal == (p3 + 25).ordinal + assert result.freq == p1.freq + assert result.freqstr == "25h" + + result = p2 + 1 + assert result.ordinal == (p3 + 25).ordinal + assert result.freq == p2.freq + assert result.freqstr == "25h" + + result = p1 - 1 + assert result.ordinal == (p3 - 25).ordinal + assert result.freq == p1.freq + assert result.freqstr == "25h" + + result = p2 - 1 + assert result.ordinal == (p3 - 25).ordinal + assert result.freq == p2.freq + assert result.freqstr == "25h" + + msg = "Frequency must be positive, because it represents span: -25h" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="-1D1h") + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="-1h1D") + with pytest.raises(ValueError, match=msg): + Period(ordinal=1, freq="-1D1h") + with pytest.raises(ValueError, match=msg): + Period(ordinal=1, freq="-1h1D") + + msg = "Frequency must be positive, because it represents span: 0D" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="0D0h") + with pytest.raises(ValueError, match=msg): + Period(ordinal=1, freq="0D0h") + + # You can only combine together day and intraday offsets + msg = "Invalid frequency: 1W1D" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="1W1D") + msg = "Invalid frequency: 1D1W" + with pytest.raises(ValueError, match=msg): + Period("2011-01", freq="1D1W") + + @pytest.mark.parametrize("day", ["1970/01/01 ", "2020-12-31 ", "1981/09/13 "]) + @pytest.mark.parametrize("hour", ["00:00:00", "00:00:01", "23:59:59", "12:00:59"]) + @pytest.mark.parametrize( + "sec_float, expected", + [ + (".000000001", 1), + (".000000999", 999), + (".123456789", 789), + (".999999999", 999), + (".999999000", 0), + # Test femtoseconds, attoseconds, picoseconds are dropped like Timestamp + (".999999001123", 1), + (".999999001123456", 1), + (".999999001123456789", 1), + ], + ) + def test_period_constructor_nanosecond(self, day, hour, sec_float, expected): + # GH 34621 + + assert Period(day + hour + sec_float).start_time.nanosecond == expected + + @pytest.mark.parametrize("hour", range(24)) + def test_period_large_ordinal(self, hour): + # Issue #36430 + # Integer overflow for Period over the maximum timestamp + p = Period(ordinal=2562048 + hour, freq="1h") + assert p.hour == hour + + +class TestPeriodMethods: + def test_round_trip(self): + p = Period("2000Q1") + new_p = tm.round_trip_pickle(p) + assert new_p == p + + def test_hash(self): + assert hash(Period("2011-01", freq="M")) == hash(Period("2011-01", freq="M")) + + assert hash(Period("2011-01-01", freq="D")) != hash(Period("2011-01", freq="M")) + + assert hash(Period("2011-01", freq="3M")) != hash(Period("2011-01", freq="2M")) + + assert hash(Period("2011-01", freq="M")) != hash(Period("2011-02", freq="M")) + + # -------------------------------------------------------------- + # to_timestamp + + def test_to_timestamp_mult(self): + p = Period("2011-01", freq="M") + assert p.to_timestamp(how="S") == Timestamp("2011-01-01") + expected = Timestamp("2011-02-01") - Timedelta(1, "ns") + assert p.to_timestamp(how="E") == expected + + p = Period("2011-01", freq="3M") + assert p.to_timestamp(how="S") == Timestamp("2011-01-01") + expected = Timestamp("2011-04-01") - Timedelta(1, "ns") + assert p.to_timestamp(how="E") == expected + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_to_timestamp(self): + p = Period("1982", freq="Y") + start_ts = p.to_timestamp(how="S") + aliases = ["s", "StarT", "BEGIn"] + for a in aliases: + assert start_ts == p.to_timestamp("D", how=a) + # freq with mult should not affect to the result + assert start_ts == p.to_timestamp("3D", how=a) + + end_ts = p.to_timestamp(how="E") + aliases = ["e", "end", "FINIsH"] + for a in aliases: + assert end_ts == p.to_timestamp("D", how=a) + assert end_ts == p.to_timestamp("3D", how=a) + + from_lst = ["Y", "Q", "M", "W", "B", "D", "h", "Min", "s"] + + def _ex(p): + if p.freq == "B": + return p.start_time + Timedelta(days=1, nanoseconds=-1) + return Timestamp((p + p.freq).start_time._value - 1) + + for fcode in from_lst: + p = Period("1982", freq=fcode) + result = p.to_timestamp().to_period(fcode) + assert result == p + + assert p.start_time == p.to_timestamp(how="S") + + assert p.end_time == _ex(p) + + # Frequency other than daily + + p = Period("1985", freq="Y") + + result = p.to_timestamp("h", how="end") + expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns") + assert result == expected + result = p.to_timestamp("3h", how="end") + assert result == expected + + result = p.to_timestamp("min", how="end") + expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns") + assert result == expected + result = p.to_timestamp("2min", how="end") + assert result == expected + + result = p.to_timestamp(how="end") + expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns") + assert result == expected + + expected = datetime(1985, 1, 1) + result = p.to_timestamp("h", how="start") + assert result == expected + result = p.to_timestamp("min", how="start") + assert result == expected + result = p.to_timestamp("s", how="start") + assert result == expected + result = p.to_timestamp("3h", how="start") + assert result == expected + result = p.to_timestamp("5s", how="start") + assert result == expected + + def test_to_timestamp_business_end(self): + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + per = Period("1990-01-05", "B") # Friday + result = per.to_timestamp("B", how="E") + + expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1) + assert result == expected + + @pytest.mark.parametrize( + "ts, expected", + [ + ("1970-01-01 00:00:00", 0), + ("1970-01-01 00:00:00.000001", 1), + ("1970-01-01 00:00:00.00001", 10), + ("1970-01-01 00:00:00.499", 499000), + ("1999-12-31 23:59:59.999", 999000), + ("1999-12-31 23:59:59.999999", 999999), + ("2050-12-31 23:59:59.5", 500000), + ("2050-12-31 23:59:59.500001", 500001), + ("2050-12-31 23:59:59.123456", 123456), + ], + ) + @pytest.mark.parametrize("freq", [None, "us", "ns"]) + def test_to_timestamp_microsecond(self, ts, expected, freq): + # GH 24444 + result = Period(ts).to_timestamp(freq=freq).microsecond + assert result == expected + + # -------------------------------------------------------------- + # Rendering: __repr__, strftime, etc + + @pytest.mark.parametrize( + "str_ts,freq,str_res,str_freq", + ( + ("Jan-2000", None, "2000-01", "M"), + ("2000-12-15", None, "2000-12-15", "D"), + ( + "2000-12-15 13:45:26.123456789", + "ns", + "2000-12-15 13:45:26.123456789", + "ns", + ), + ("2000-12-15 13:45:26.123456789", "us", "2000-12-15 13:45:26.123456", "us"), + ("2000-12-15 13:45:26.123456", None, "2000-12-15 13:45:26.123456", "us"), + ("2000-12-15 13:45:26.123456789", "ms", "2000-12-15 13:45:26.123", "ms"), + ("2000-12-15 13:45:26.123", None, "2000-12-15 13:45:26.123", "ms"), + ("2000-12-15 13:45:26", "s", "2000-12-15 13:45:26", "s"), + ("2000-12-15 13:45:26", "min", "2000-12-15 13:45", "min"), + ("2000-12-15 13:45:26", "h", "2000-12-15 13:00", "h"), + ("2000-12-15", "Y", "2000", "Y-DEC"), + ("2000-12-15", "Q", "2000Q4", "Q-DEC"), + ("2000-12-15", "M", "2000-12", "M"), + ("2000-12-15", "W", "2000-12-11/2000-12-17", "W-SUN"), + ("2000-12-15", "D", "2000-12-15", "D"), + ("2000-12-15", "B", "2000-12-15", "B"), + ), + ) + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_repr(self, str_ts, freq, str_res, str_freq): + p = Period(str_ts, freq=freq) + assert str(p) == str_res + assert repr(p) == f"Period('{str_res}', '{str_freq}')" + + def test_repr_nat(self): + p = Period("nat", freq="M") + assert repr(NaT) in repr(p) + + def test_strftime(self): + # GH#3363 + p = Period("2000-1-1 12:34:12", freq="s") + res = p.strftime("%Y-%m-%d %H:%M:%S") + assert res == "2000-01-01 12:34:12" + assert isinstance(res, str) + + +class TestPeriodProperties: + """Test properties such as year, month, weekday, etc....""" + + @pytest.mark.parametrize("freq", ["Y", "M", "D", "h"]) + def test_is_leap_year(self, freq): + # GH 13727 + p = Period("2000-01-01 00:00:00", freq=freq) + assert p.is_leap_year + assert isinstance(p.is_leap_year, bool) + + p = Period("1999-01-01 00:00:00", freq=freq) + assert not p.is_leap_year + + p = Period("2004-01-01 00:00:00", freq=freq) + assert p.is_leap_year + + p = Period("2100-01-01 00:00:00", freq=freq) + assert not p.is_leap_year + + def test_quarterly_negative_ordinals(self): + p = Period(ordinal=-1, freq="Q-DEC") + assert p.year == 1969 + assert p.quarter == 4 + assert isinstance(p, Period) + + p = Period(ordinal=-2, freq="Q-DEC") + assert p.year == 1969 + assert p.quarter == 3 + assert isinstance(p, Period) + + p = Period(ordinal=-2, freq="M") + assert p.year == 1969 + assert p.month == 11 + assert isinstance(p, Period) + + def test_freq_str(self): + i1 = Period("1982", freq="Min") + assert i1.freq == offsets.Minute() + assert i1.freqstr == "min" + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_period_deprecated_freq(self): + cases = { + "M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"], + "B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"], + "D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"], + "h": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"], + "min": ["minute", "MINUTE", "MINUTELY", "minutely"], + "s": ["sec", "SEC", "SECOND", "SECONDLY", "second"], + "ms": ["MILLISECOND", "MILLISECONDLY", "millisecond"], + "us": ["MICROSECOND", "MICROSECONDLY", "microsecond"], + "ns": ["NANOSECOND", "NANOSECONDLY", "nanosecond"], + } + + msg = INVALID_FREQ_ERR_MSG + for exp, freqs in cases.items(): + for freq in freqs: + with pytest.raises(ValueError, match=msg): + Period("2016-03-01 09:00", freq=freq) + with pytest.raises(ValueError, match=msg): + Period(ordinal=1, freq=freq) + + # check supported freq-aliases still works + p1 = Period("2016-03-01 09:00", freq=exp) + p2 = Period(ordinal=1, freq=exp) + assert isinstance(p1, Period) + assert isinstance(p2, Period) + + @staticmethod + def _period_constructor(bound, offset): + return Period( + year=bound.year, + month=bound.month, + day=bound.day, + hour=bound.hour, + minute=bound.minute, + second=bound.second + offset, + freq="us", + ) + + @pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)]) + @pytest.mark.parametrize("period_property", ["start_time", "end_time"]) + def test_outer_bounds_start_and_end_time(self, bound, offset, period_property): + # GH #13346 + period = TestPeriodProperties._period_constructor(bound, offset) + with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond"): + getattr(period, period_property) + + @pytest.mark.parametrize("bound, offset", [(Timestamp.min, -1), (Timestamp.max, 1)]) + @pytest.mark.parametrize("period_property", ["start_time", "end_time"]) + def test_inner_bounds_start_and_end_time(self, bound, offset, period_property): + # GH #13346 + period = TestPeriodProperties._period_constructor(bound, -offset) + expected = period.to_timestamp().round(freq="s") + assert getattr(period, period_property).round(freq="s") == expected + expected = (bound - offset * Timedelta(1, unit="s")).floor("s") + assert getattr(period, period_property).floor("s") == expected + + def test_start_time(self): + freq_lst = ["Y", "Q", "M", "D", "h", "min", "s"] + xp = datetime(2012, 1, 1) + for f in freq_lst: + p = Period("2012", freq=f) + assert p.start_time == xp + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period("2012", freq="B").start_time == datetime(2012, 1, 2) + assert Period("2012", freq="W").start_time == datetime(2011, 12, 26) + + def test_end_time(self): + p = Period("2012", freq="Y") + + def _ex(*args): + return Timestamp(Timestamp(datetime(*args)).as_unit("ns")._value - 1) + + xp = _ex(2013, 1, 1) + assert xp == p.end_time + + p = Period("2012", freq="Q") + xp = _ex(2012, 4, 1) + assert xp == p.end_time + + p = Period("2012", freq="M") + xp = _ex(2012, 2, 1) + assert xp == p.end_time + + p = Period("2012", freq="D") + xp = _ex(2012, 1, 2) + assert xp == p.end_time + + p = Period("2012", freq="h") + xp = _ex(2012, 1, 1, 1) + assert xp == p.end_time + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + p = Period("2012", freq="B") + xp = _ex(2012, 1, 3) + assert xp == p.end_time + + p = Period("2012", freq="W") + xp = _ex(2012, 1, 2) + assert xp == p.end_time + + # Test for GH 11738 + p = Period("2012", freq="15D") + xp = _ex(2012, 1, 16) + assert xp == p.end_time + + p = Period("2012", freq="1D1h") + xp = _ex(2012, 1, 2, 1) + assert xp == p.end_time + + p = Period("2012", freq="1h1D") + xp = _ex(2012, 1, 2, 1) + assert xp == p.end_time + + def test_end_time_business_friday(self): + # GH#34449 + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + per = Period("1990-01-05", "B") + result = per.end_time + + expected = Timestamp("1990-01-06") - Timedelta(nanoseconds=1) + assert result == expected + + def test_anchor_week_end_time(self): + def _ex(*args): + return Timestamp(Timestamp(datetime(*args)).as_unit("ns")._value - 1) + + p = Period("2013-1-1", "W-SAT") + xp = _ex(2013, 1, 6) + assert p.end_time == xp + + def test_properties_annually(self): + # Test properties on Periods with annually frequency. + a_date = Period(freq="Y", year=2007) + assert a_date.year == 2007 + + def test_properties_quarterly(self): + # Test properties on Periods with daily frequency. + qedec_date = Period(freq="Q-DEC", year=2007, quarter=1) + qejan_date = Period(freq="Q-JAN", year=2007, quarter=1) + qejun_date = Period(freq="Q-JUN", year=2007, quarter=1) + # + for x in range(3): + for qd in (qedec_date, qejan_date, qejun_date): + assert (qd + x).qyear == 2007 + assert (qd + x).quarter == x + 1 + + def test_properties_monthly(self): + # Test properties on Periods with daily frequency. + m_date = Period(freq="M", year=2007, month=1) + for x in range(11): + m_ival_x = m_date + x + assert m_ival_x.year == 2007 + if 1 <= x + 1 <= 3: + assert m_ival_x.quarter == 1 + elif 4 <= x + 1 <= 6: + assert m_ival_x.quarter == 2 + elif 7 <= x + 1 <= 9: + assert m_ival_x.quarter == 3 + elif 10 <= x + 1 <= 12: + assert m_ival_x.quarter == 4 + assert m_ival_x.month == x + 1 + + def test_properties_weekly(self): + # Test properties on Periods with daily frequency. + w_date = Period(freq="W", year=2007, month=1, day=7) + # + assert w_date.year == 2007 + assert w_date.quarter == 1 + assert w_date.month == 1 + assert w_date.week == 1 + assert (w_date - 1).week == 52 + assert w_date.days_in_month == 31 + assert Period(freq="W", year=2012, month=2, day=1).days_in_month == 29 + + def test_properties_weekly_legacy(self): + # Test properties on Periods with daily frequency. + w_date = Period(freq="W", year=2007, month=1, day=7) + assert w_date.year == 2007 + assert w_date.quarter == 1 + assert w_date.month == 1 + assert w_date.week == 1 + assert (w_date - 1).week == 52 + assert w_date.days_in_month == 31 + + exp = Period(freq="W", year=2012, month=2, day=1) + assert exp.days_in_month == 29 + + msg = INVALID_FREQ_ERR_MSG + with pytest.raises(ValueError, match=msg): + Period(freq="WK", year=2007, month=1, day=7) + + def test_properties_daily(self): + # Test properties on Periods with daily frequency. + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + b_date = Period(freq="B", year=2007, month=1, day=1) + # + assert b_date.year == 2007 + assert b_date.quarter == 1 + assert b_date.month == 1 + assert b_date.day == 1 + assert b_date.weekday == 0 + assert b_date.dayofyear == 1 + assert b_date.days_in_month == 31 + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + assert Period(freq="B", year=2012, month=2, day=1).days_in_month == 29 + + d_date = Period(freq="D", year=2007, month=1, day=1) + + assert d_date.year == 2007 + assert d_date.quarter == 1 + assert d_date.month == 1 + assert d_date.day == 1 + assert d_date.weekday == 0 + assert d_date.dayofyear == 1 + assert d_date.days_in_month == 31 + assert Period(freq="D", year=2012, month=2, day=1).days_in_month == 29 + + def test_properties_hourly(self): + # Test properties on Periods with hourly frequency. + h_date1 = Period(freq="h", year=2007, month=1, day=1, hour=0) + h_date2 = Period(freq="2h", year=2007, month=1, day=1, hour=0) + + for h_date in [h_date1, h_date2]: + assert h_date.year == 2007 + assert h_date.quarter == 1 + assert h_date.month == 1 + assert h_date.day == 1 + assert h_date.weekday == 0 + assert h_date.dayofyear == 1 + assert h_date.hour == 0 + assert h_date.days_in_month == 31 + assert ( + Period(freq="h", year=2012, month=2, day=1, hour=0).days_in_month == 29 + ) + + def test_properties_minutely(self): + # Test properties on Periods with minutely frequency. + t_date = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0) + # + assert t_date.quarter == 1 + assert t_date.month == 1 + assert t_date.day == 1 + assert t_date.weekday == 0 + assert t_date.dayofyear == 1 + assert t_date.hour == 0 + assert t_date.minute == 0 + assert t_date.days_in_month == 31 + assert ( + Period(freq="D", year=2012, month=2, day=1, hour=0, minute=0).days_in_month + == 29 + ) + + def test_properties_secondly(self): + # Test properties on Periods with secondly frequency. + s_date = Period( + freq="Min", year=2007, month=1, day=1, hour=0, minute=0, second=0 + ) + # + assert s_date.year == 2007 + assert s_date.quarter == 1 + assert s_date.month == 1 + assert s_date.day == 1 + assert s_date.weekday == 0 + assert s_date.dayofyear == 1 + assert s_date.hour == 0 + assert s_date.minute == 0 + assert s_date.second == 0 + assert s_date.days_in_month == 31 + assert ( + Period( + freq="Min", year=2012, month=2, day=1, hour=0, minute=0, second=0 + ).days_in_month + == 29 + ) + + +class TestPeriodComparisons: + def test_sort_periods(self): + jan = Period("2000-01", "M") + feb = Period("2000-02", "M") + mar = Period("2000-03", "M") + periods = [mar, jan, feb] + correctPeriods = [jan, feb, mar] + assert sorted(periods) == correctPeriods + + +def test_period_immutable(): + # see gh-17116 + msg = "not writable" + + per = Period("2014Q1") + with pytest.raises(AttributeError, match=msg): + per.ordinal = 14 + + freq = per.freq + with pytest.raises(AttributeError, match=msg): + per.freq = 2 * freq + + +def test_small_year_parsing(): + per1 = Period("0001-01-07", "D") + assert per1.year == 1 + assert per1.day == 7 + + +def test_negone_ordinals(): + freqs = ["Y", "M", "Q", "D", "h", "min", "s"] + + period = Period(ordinal=-1, freq="D") + for freq in freqs: + repr(period.asfreq(freq)) + + for freq in freqs: + period = Period(ordinal=-1, freq=freq) + repr(period) + assert period.year == 1969 + + with tm.assert_produces_warning(FutureWarning, match=bday_msg): + period = Period(ordinal=-1, freq="B") + repr(period) + period = Period(ordinal=-1, freq="W") + repr(period)