diff --git a/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..d5709012c8b3217745263c869aed80117a251f26 --- /dev/null +++ b/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6101e0d4b6e9dbcf4502c22f6913f97780d8582dcf6d92a6b6fc812938b1745 +size 33555612 diff --git a/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..c93eba8309f927b68f720a3074f509805b4fe047 --- /dev/null +++ b/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fc87ed0a360ddf81ffb1b2c0bc718db0ccbefc3fb97aba525779364579ee0f8 +size 33555533 diff --git a/ckpts/universal/global_step80/zero/9.attention.dense.weight/fp32.pt b/ckpts/universal/global_step80/zero/9.attention.dense.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e4370b3d6972a4e28e527678c81e44138b4da61 --- /dev/null +++ b/ckpts/universal/global_step80/zero/9.attention.dense.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d7b915fb8268f1ce18c079d5df8054171ae4bcb7f353381df39f7f21bf9bab9 +size 16778317 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52b37c01fff980e3e4e4755dc327756adad431fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9869a4e247ce0d3aa4b7b11d9a255c2829174c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_any_index.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_any_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b05aa2ee82a3fa9ea55dda808091745965593f2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_any_index.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6888f842033b4a140b7e823fbe625e00495b8a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce1af2d83ffe76d87d2b1c05ab6d0485e9bc093a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_datetimelike.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_datetimelike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcfe85222193fc5e7da8399b56a770e89faaa418 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_datetimelike.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_engines.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_engines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdb50b7806f251b6f8ee115ae893ab9deef80ac4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_engines.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_frozen.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_frozen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f5edd49442945b7ce6f2bb67588d8d0e65b4688 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_frozen.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_index_new.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_index_new.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f22f7d7e2b93ced184b07548c6261a7c770033c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_index_new.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbb068d053d7abe205f3ca28ae58deb41945f3b3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_numpy_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_numpy_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b9ab9d99db6eb411a42f8f536aafd412c5f248a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_numpy_compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_old_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_old_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84ae1a2fccbc8154109498a2bf7f83fd05350bb9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_old_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_setops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_setops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2fb6aee7421df4b4922d556c746051d879a4f51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_setops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_subclass.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed5561e3dfaf1ac29fc26e5097dde9b931501f6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/__pycache__/test_subclass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3888a0139f6f4a8820376afc9cbf0aed9cf1892 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2573c07e863a27637482c45c82238f81587cc0c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_append.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c8e7c3be35f82856e6a12c7bfb396f3eb16e173 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_astype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_category.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_category.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..138235f8bc6028a23633ac63cd6b45f17648c5cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_category.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..776dae37ee7bed83efd849967679e9944505fa1a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e51e9faf73afaf57aedeef36755109546d5ea75e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_equals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4891f7785a8fe703ff09ed45e7056274d68f5297 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_fillna.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_formats.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2125dd47102fac094d50f2c2c013bc8b2b5f17b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_formats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ae179ab4c437096ae84eb981c069beaee99c657 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2c4a6090bd9c87e23418e18dba26ff00b0d1183 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_map.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_reindex.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_reindex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..403f470c20054adce93a602aadde73cc28b0afc7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_reindex.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd453ea2ae2a36e0a44c596b3dd1b2511f3f9e47 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/__pycache__/test_setops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_append.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_append.py new file mode 100644 index 0000000000000000000000000000000000000000..b48c3219f5111a7a1226d09ce4625c723c4168fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_append.py @@ -0,0 +1,62 @@ +import pytest + +from pandas import ( + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +class TestAppend: + @pytest.fixture + def ci(self): + categories = list("cab") + return CategoricalIndex(list("aabbca"), categories=categories, ordered=False) + + def test_append(self, ci): + # append cats with the same categories + result = ci[:3].append(ci[3:]) + tm.assert_index_equal(result, ci, exact=True) + + foos = [ci[:1], ci[1:3], ci[3:]] + result = foos[0].append(foos[1:]) + tm.assert_index_equal(result, ci, exact=True) + + def test_append_empty(self, ci): + # empty + result = ci.append([]) + tm.assert_index_equal(result, ci, exact=True) + + def test_append_mismatched_categories(self, ci): + # appending with different categories or reordered is not ok + msg = "all inputs must be Index" + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.set_categories(list("abcd"))) + with pytest.raises(TypeError, match=msg): + ci.append(ci.values.reorder_categories(list("abc"))) + + def test_append_category_objects(self, ci): + # with objects + result = ci.append(Index(["c", "a"])) + expected = CategoricalIndex(list("aabbcaca"), categories=ci.categories) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_non_categories(self, ci): + # invalid objects -> cast to object via concat_compat + result = ci.append(Index(["a", "d"])) + expected = Index(["a", "a", "b", "b", "c", "a", "a", "d"]) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_object(self, ci): + # GH#14298 - if base object is not categorical -> coerce to object + result = Index(["c", "a"]).append(ci) + expected = Index(list("caaabbca")) + tm.assert_index_equal(result, expected, exact=True) + + def test_append_to_another(self): + # hits Index._concat + fst = Index(["a", "b"]) + snd = CategoricalIndex(["d", "e"]) + result = fst.append(snd) + expected = Index(["a", "b", "d", "e"]) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..a17627b7515b26b1fcfdca0feec376f03a018e83 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_astype.py @@ -0,0 +1,90 @@ +from datetime import date + +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, + IntervalIndex, +) +import pandas._testing as tm + + +class TestAstype: + def test_astype(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + result = ci.astype(object) + tm.assert_index_equal(result, Index(np.array(ci), dtype=object)) + + # this IS equal, but not the same class + assert result.equals(ci) + assert isinstance(result, Index) + assert not isinstance(result, CategoricalIndex) + + # interval + ii = IntervalIndex.from_arrays(left=[-0.001, 2.0], right=[2, 4], closed="right") + + ci = CategoricalIndex( + Categorical.from_codes([0, 1, -1], categories=ii, ordered=True) + ) + + result = ci.astype("interval") + expected = ii.take([0, 1, -1], allow_fill=True, fill_value=np.nan) + tm.assert_index_equal(result, expected) + + result = IntervalIndex(result.values) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("name", [None, "foo"]) + @pytest.mark.parametrize("dtype_ordered", [True, False]) + @pytest.mark.parametrize("index_ordered", [True, False]) + def test_astype_category(self, name, dtype_ordered, index_ordered): + # GH#18630 + index = CategoricalIndex( + list("aabbca"), categories=list("cab"), ordered=index_ordered + ) + if name: + index = index.rename(name) + + # standard categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = index.astype(dtype) + expected = CategoricalIndex( + index.tolist(), + name=name, + categories=index.categories, + ordered=dtype_ordered, + ) + tm.assert_index_equal(result, expected) + + # non-standard categories + dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered) + result = index.astype(dtype) + expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype) + tm.assert_index_equal(result, expected) + + if dtype_ordered is False: + # dtype='category' can't specify ordered, so only test once + result = index.astype("category") + expected = index + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("box", [True, False]) + def test_categorical_date_roundtrip(self, box): + # astype to categorical and back should preserve date objects + v = date.today() + + obj = Index([v, v]) + assert obj.dtype == object + if box: + obj = obj.array + + cat = obj.astype("category") + + rtrip = cat.astype(object) + assert rtrip.dtype == object + assert type(rtrip[0]) is date diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py new file mode 100644 index 0000000000000000000000000000000000000000..03a298a13dc2b45b3e78ec2d6390741709e42590 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_category.py @@ -0,0 +1,394 @@ +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import index as libindex +from pandas._libs.arrays import NDArrayBacked + +import pandas as pd +from pandas import ( + Categorical, + CategoricalDtype, +) +import pandas._testing as tm +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, +) + + +class TestCategoricalIndex: + @pytest.fixture + def simple_index(self) -> CategoricalIndex: + return CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + def test_can_hold_identifiers(self): + idx = CategoricalIndex(list("aabbca"), categories=None, ordered=False) + key = idx[0] + assert idx._can_hold_identifiers_and_holds_name(key) is True + + def test_insert(self, simple_index): + ci = simple_index + categories = ci.categories + + # test 0th element + result = ci.insert(0, "a") + expected = CategoricalIndex(list("aaabbca"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # test Nth element that follows Python list behavior + result = ci.insert(-1, "a") + expected = CategoricalIndex(list("aabbcaa"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # test empty + result = CategoricalIndex([], categories=categories).insert(0, "a") + expected = CategoricalIndex(["a"], categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + # invalid -> cast to object + expected = ci.astype(object).insert(0, "d") + result = ci.insert(0, "d").astype(object) + tm.assert_index_equal(result, expected, exact=True) + + # GH 18295 (test missing) + expected = CategoricalIndex(["a", np.nan, "a", "b", "c", "b"]) + for na in (np.nan, pd.NaT, None): + result = CategoricalIndex(list("aabcb")).insert(1, na) + tm.assert_index_equal(result, expected) + + def test_insert_na_mismatched_dtype(self): + ci = CategoricalIndex([0, 1, 1]) + result = ci.insert(0, pd.NaT) + expected = Index([pd.NaT, 0, 1, 1], dtype=object) + tm.assert_index_equal(result, expected) + + def test_delete(self, simple_index): + ci = simple_index + categories = ci.categories + + result = ci.delete(0) + expected = CategoricalIndex(list("abbca"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + result = ci.delete(-1) + expected = CategoricalIndex(list("aabbc"), categories=categories) + tm.assert_index_equal(result, expected, exact=True) + + with tm.external_error_raised((IndexError, ValueError)): + # Either depending on NumPy version + ci.delete(10) + + @pytest.mark.parametrize( + "data, non_lexsorted_data", + [[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]], + ) + def test_is_monotonic(self, data, non_lexsorted_data): + c = CategoricalIndex(data) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(data, ordered=True) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(data, categories=reversed(data)) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is True + + c = CategoricalIndex(data, categories=reversed(data), ordered=True) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is True + + # test when data is neither monotonic increasing nor decreasing + reordered_data = [data[0], data[2], data[1]] + c = CategoricalIndex(reordered_data, categories=reversed(data)) + assert c.is_monotonic_increasing is False + assert c.is_monotonic_decreasing is False + + # non lexsorted categories + categories = non_lexsorted_data + + c = CategoricalIndex(categories[:2], categories=categories) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + c = CategoricalIndex(categories[1:3], categories=categories) + assert c.is_monotonic_increasing is True + assert c.is_monotonic_decreasing is False + + def test_has_duplicates(self): + idx = CategoricalIndex([0, 0, 0], name="foo") + assert idx.is_unique is False + assert idx.has_duplicates is True + + idx = CategoricalIndex([0, 1], categories=[2, 3], name="foo") + assert idx.is_unique is False + assert idx.has_duplicates is True + + idx = CategoricalIndex([0, 1, 2, 3], categories=[1, 2, 3], name="foo") + assert idx.is_unique is True + assert idx.has_duplicates is False + + @pytest.mark.parametrize( + "data, categories, expected", + [ + ( + [1, 1, 1], + [1, 2, 3], + { + "first": np.array([False, True, True]), + "last": np.array([True, True, False]), + False: np.array([True, True, True]), + }, + ), + ( + [1, 1, 1], + list("abc"), + { + "first": np.array([False, True, True]), + "last": np.array([True, True, False]), + False: np.array([True, True, True]), + }, + ), + ( + [2, "a", "b"], + list("abc"), + { + "first": np.zeros(shape=(3), dtype=np.bool_), + "last": np.zeros(shape=(3), dtype=np.bool_), + False: np.zeros(shape=(3), dtype=np.bool_), + }, + ), + ( + list("abb"), + list("abc"), + { + "first": np.array([False, False, True]), + "last": np.array([False, True, False]), + False: np.array([False, True, True]), + }, + ), + ], + ) + def test_drop_duplicates(self, data, categories, expected): + idx = CategoricalIndex(data, categories=categories, name="foo") + for keep, e in expected.items(): + tm.assert_numpy_array_equal(idx.duplicated(keep=keep), e) + e = idx[~e] + result = idx.drop_duplicates(keep=keep) + tm.assert_index_equal(result, e) + + @pytest.mark.parametrize( + "data, categories, expected_data", + [ + ([1, 1, 1], [1, 2, 3], [1]), + ([1, 1, 1], list("abc"), [np.nan]), + ([1, 2, "a"], [1, 2, 3], [1, 2, np.nan]), + ([2, "a", "b"], list("abc"), [np.nan, "a", "b"]), + ], + ) + def test_unique(self, data, categories, expected_data, ordered): + dtype = CategoricalDtype(categories, ordered=ordered) + + idx = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(expected_data, dtype=dtype) + tm.assert_index_equal(idx.unique(), expected) + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="repr doesn't roundtrip") + def test_repr_roundtrip(self): + ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + str(ci) + tm.assert_index_equal(eval(repr(ci)), ci, exact=True) + + # formatting + str(ci) + + # long format + # this is not reprable + ci = CategoricalIndex(np.random.default_rng(2).integers(0, 5, size=100)) + str(ci) + + def test_isin(self): + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + tm.assert_numpy_array_equal( + ci.isin(["c"]), np.array([False, False, False, True, False, False]) + ) + tm.assert_numpy_array_equal( + ci.isin(["c", "a", "b"]), np.array([True] * 5 + [False]) + ) + tm.assert_numpy_array_equal( + ci.isin(["c", "a", "b", np.nan]), np.array([True] * 6) + ) + + # mismatched categorical -> coerced to ndarray so doesn't matter + result = ci.isin(ci.set_categories(list("abcdefghi"))) + expected = np.array([True] * 6) + tm.assert_numpy_array_equal(result, expected) + + result = ci.isin(ci.set_categories(list("defghi"))) + expected = np.array([False] * 5 + [True]) + tm.assert_numpy_array_equal(result, expected) + + def test_isin_overlapping_intervals(self): + # GH 34974 + idx = pd.IntervalIndex([pd.Interval(0, 2), pd.Interval(0, 1)]) + result = CategoricalIndex(idx).isin(idx) + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_identical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + assert ci1.identical(ci1) + assert ci1.identical(ci1.copy()) + assert not ci1.identical(ci2) + + def test_ensure_copied_data(self): + # gh-12309: Check the "copy" argument of each + # Index.__new__ is honored. + # + # Must be tested separately from other indexes because + # self.values is not an ndarray. + index = CategoricalIndex(list("ab") * 5) + + result = CategoricalIndex(index.values, copy=True) + tm.assert_index_equal(index, result) + assert not np.shares_memory(result._data._codes, index._data._codes) + + result = CategoricalIndex(index.values, copy=False) + assert result._data._codes is index._data._codes + + +class TestCategoricalIndex2: + def test_view_i8(self): + # GH#25464 + ci = CategoricalIndex(list("ab") * 50) + msg = "When changing to a larger dtype, its size must be a divisor" + with pytest.raises(ValueError, match=msg): + ci.view("i8") + with pytest.raises(ValueError, match=msg): + ci._data.view("i8") + + ci = ci[:-4] # length divisible by 8 + + res = ci.view("i8") + expected = ci._data.codes.view("i8") + tm.assert_numpy_array_equal(res, expected) + + cat = ci._data + tm.assert_numpy_array_equal(cat.view("i8"), expected) + + @pytest.mark.parametrize( + "dtype, engine_type", + [ + (np.int8, libindex.Int8Engine), + (np.int16, libindex.Int16Engine), + (np.int32, libindex.Int32Engine), + (np.int64, libindex.Int64Engine), + ], + ) + def test_engine_type(self, dtype, engine_type): + if dtype != np.int64: + # num. of uniques required to push CategoricalIndex.codes to a + # dtype (128 categories required for .codes dtype to be int16 etc.) + num_uniques = {np.int8: 1, np.int16: 128, np.int32: 32768}[dtype] + ci = CategoricalIndex(range(num_uniques)) + else: + # having 2**32 - 2**31 categories would be very memory-intensive, + # so we cheat a bit with the dtype + ci = CategoricalIndex(range(32768)) # == 2**16 - 2**(16 - 1) + arr = ci.values._ndarray.astype("int64") + NDArrayBacked.__init__(ci._data, arr, ci.dtype) + assert np.issubdtype(ci.codes.dtype, dtype) + assert isinstance(ci._engine, engine_type) + + @pytest.mark.parametrize( + "func,op_name", + [ + (lambda idx: idx - idx, "__sub__"), + (lambda idx: idx + idx, "__add__"), + (lambda idx: idx - ["a", "b"], "__sub__"), + (lambda idx: idx + ["a", "b"], "__add__"), + (lambda idx: ["a", "b"] - idx, "__rsub__"), + (lambda idx: ["a", "b"] + idx, "__radd__"), + ], + ) + def test_disallow_addsub_ops(self, func, op_name): + # GH 10039 + # set ops (+/-) raise TypeError + idx = Index(Categorical(["a", "b"])) + cat_or_list = "'(Categorical|list)' and '(Categorical|list)'" + msg = "|".join( + [ + f"cannot perform {op_name} with this index type: CategoricalIndex", + "can only concatenate list", + rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}", + ] + ) + with pytest.raises(TypeError, match=msg): + func(idx) + + def test_method_delegation(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.set_categories(list("cab")) + tm.assert_index_equal( + result, CategoricalIndex(list("aabbca"), categories=list("cab")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.rename_categories(list("efg")) + tm.assert_index_equal( + result, CategoricalIndex(list("ffggef"), categories=list("efg")) + ) + + # GH18862 (let rename_categories take callables) + result = ci.rename_categories(lambda x: x.upper()) + tm.assert_index_equal( + result, CategoricalIndex(list("AABBCA"), categories=list("CAB")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.add_categories(["d"]) + tm.assert_index_equal( + result, CategoricalIndex(list("aabbca"), categories=list("cabd")) + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + result = ci.remove_categories(["c"]) + tm.assert_index_equal( + result, + CategoricalIndex(list("aabb") + [np.nan] + ["a"], categories=list("ab")), + ) + + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.as_unordered() + tm.assert_index_equal(result, ci) + + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef")) + result = ci.as_ordered() + tm.assert_index_equal( + result, + CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=True), + ) + + # invalid + msg = "cannot use inplace with CategoricalIndex" + with pytest.raises(ValueError, match=msg): + ci.set_categories(list("cab"), inplace=True) + + def test_remove_maintains_order(self): + ci = CategoricalIndex(list("abcdda"), categories=list("abcd")) + result = ci.reorder_categories(["d", "c", "b", "a"], ordered=True) + tm.assert_index_equal( + result, + CategoricalIndex(list("abcdda"), categories=list("dcba"), ordered=True), + ) + result = result.remove_categories(["c"]) + tm.assert_index_equal( + result, + CategoricalIndex( + ["a", "b", np.nan, "d", "d", "a"], categories=list("dba"), ordered=True + ), + ) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..f0c5307fc5c641ff25d26bd2bd8a158b43dd6a6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_constructors.py @@ -0,0 +1,142 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +class TestCategoricalIndexConstructors: + def test_construction_disallows_scalar(self): + msg = "must be called with a collection of some kind" + with pytest.raises(TypeError, match=msg): + CategoricalIndex(data=1, categories=list("abcd"), ordered=False) + with pytest.raises(TypeError, match=msg): + CategoricalIndex(categories=list("abcd"), ordered=False) + + def test_construction(self): + ci = CategoricalIndex(list("aabbca"), categories=list("abcd"), ordered=False) + categories = ci.categories + + result = Index(ci) + tm.assert_index_equal(result, ci, exact=True) + assert not result.ordered + + result = Index(ci.values) + tm.assert_index_equal(result, ci, exact=True) + assert not result.ordered + + # empty + result = CategoricalIndex([], categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal(result.codes, np.array([], dtype="int8")) + assert not result.ordered + + # passing categories + result = CategoricalIndex(list("aabbca"), categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + + c = Categorical(list("aabbca")) + result = CategoricalIndex(c) + tm.assert_index_equal(result.categories, Index(list("abc"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(c, categories=categories) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + ci = CategoricalIndex(c, categories=list("abcd")) + result = CategoricalIndex(ci) + tm.assert_index_equal(result.categories, Index(categories)) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(ci, categories=list("ab")) + tm.assert_index_equal(result.categories, Index(list("ab"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8") + ) + assert not result.ordered + + result = CategoricalIndex(ci, categories=list("ab"), ordered=True) + tm.assert_index_equal(result.categories, Index(list("ab"))) + tm.assert_numpy_array_equal( + result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8") + ) + assert result.ordered + + result = CategoricalIndex(ci, categories=list("ab"), ordered=True) + expected = CategoricalIndex( + ci, categories=list("ab"), ordered=True, dtype="category" + ) + tm.assert_index_equal(result, expected, exact=True) + + # turn me to an Index + result = Index(np.array(ci)) + assert isinstance(result, Index) + assert not isinstance(result, CategoricalIndex) + + def test_construction_with_dtype(self): + # specify dtype + ci = CategoricalIndex(list("aabbca"), categories=list("abc"), ordered=False) + + result = Index(np.array(ci), dtype="category") + tm.assert_index_equal(result, ci, exact=True) + + result = Index(np.array(ci).tolist(), dtype="category") + tm.assert_index_equal(result, ci, exact=True) + + # these are generally only equal when the categories are reordered + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + + result = Index(np.array(ci), dtype="category").reorder_categories(ci.categories) + tm.assert_index_equal(result, ci, exact=True) + + # make sure indexes are handled + idx = Index(range(3)) + expected = CategoricalIndex([0, 1, 2], categories=idx, ordered=True) + result = CategoricalIndex(idx, categories=idx, ordered=True) + tm.assert_index_equal(result, expected, exact=True) + + def test_construction_empty_with_bool_categories(self): + # see GH#22702 + cat = CategoricalIndex([], categories=[True, False]) + categories = sorted(cat.categories.tolist()) + assert categories == [False, True] + + def test_construction_with_categorical_dtype(self): + # construction with CategoricalDtype + # GH#18109 + data, cats, ordered = "a a b b".split(), "c b a".split(), True + dtype = CategoricalDtype(categories=cats, ordered=ordered) + + result = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(data, categories=cats, ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # GH#19032 + result = Index(data, dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + # error when combining categories/ordered and dtype kwargs + msg = "Cannot specify `categories` or `ordered` together with `dtype`." + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, categories=cats, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, ordered=ordered, dtype=dtype) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py new file mode 100644 index 0000000000000000000000000000000000000000..a8353f301a3c39a50b2a0c5541722551ff660e30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_equals.py @@ -0,0 +1,96 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + MultiIndex, +) + + +class TestEquals: + def test_equals_categorical(self): + ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True) + ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True) + + assert ci1.equals(ci1) + assert not ci1.equals(ci2) + assert ci1.equals(ci1.astype(object)) + assert ci1.astype(object).equals(ci1) + + assert (ci1 == ci1).all() + assert not (ci1 != ci1).all() + assert not (ci1 > ci1).all() + assert not (ci1 < ci1).all() + assert (ci1 <= ci1).all() + assert (ci1 >= ci1).all() + + assert not (ci1 == 1).all() + assert (ci1 == Index(["a", "b"])).all() + assert (ci1 == ci1.values).all() + + # invalid comparisons + with pytest.raises(ValueError, match="Lengths must match"): + ci1 == Index(["a", "b", "c"]) + + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + ci1 == ci2 + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, ordered=False) + with pytest.raises(TypeError, match=msg): + ci1 == Categorical(ci1.values, categories=list("abc")) + + # tests + # make sure that we are testing for category inclusion properly + ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + # Same categories, but different order + # Unordered + assert ci.equals(CategoricalIndex(list("aabca"))) + # Ordered + assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True)) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca")) + assert not ci.equals(CategoricalIndex(list("aabca"))) + assert ci.equals(ci.copy()) + + ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"]) + assert not ci.equals(list("aabca") + [np.nan]) + assert ci.equals(CategoricalIndex(list("aabca") + [np.nan])) + assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True)) + assert ci.equals(ci.copy()) + + def test_equals_categorical_unordered(self): + # https://github.com/pandas-dev/pandas/issues/16603 + a = CategoricalIndex(["A"], categories=["A", "B"]) + b = CategoricalIndex(["A"], categories=["B", "A"]) + c = CategoricalIndex(["C"], categories=["B", "A"]) + assert a.equals(b) + assert not a.equals(c) + assert not b.equals(c) + + def test_equals_non_category(self): + # GH#37667 Case where other contains a value not among ci's + # categories ("D") and also contains np.nan + ci = CategoricalIndex(["A", "B", np.nan, np.nan]) + other = Index(["A", "B", "D", np.nan]) + + assert not ci.equals(other) + + def test_equals_multiindex(self): + # dont raise NotImplementedError when calling is_dtype_compat + + mi = MultiIndex.from_arrays([["A", "B", "C", "D"], range(4)]) + ci = mi.to_flat_index().astype("category") + + assert not ci.equals(mi) + + def test_equals_string_dtype(self, any_string_dtype): + # GH#55364 + idx = CategoricalIndex(list("abc"), name="B") + other = Index(["a", "b", "c"], name="B", dtype=any_string_dtype) + assert idx.equals(other) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_fillna.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_fillna.py new file mode 100644 index 0000000000000000000000000000000000000000..09de578f3c649e5a90278f11b1e3cd5b1d0646d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_fillna.py @@ -0,0 +1,54 @@ +import numpy as np +import pytest + +from pandas import CategoricalIndex +import pandas._testing as tm + + +class TestFillNA: + def test_fillna_categorical(self): + # GH#11343 + idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name="x") + # fill by value in categories + exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name="x") + tm.assert_index_equal(idx.fillna(1.0), exp) + + cat = idx._data + + # fill by value not in categories raises TypeError on EA, casts on CI + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + cat.fillna(2.0) + + result = idx.fillna(2.0) + expected = idx.astype(object).fillna(2.0) + tm.assert_index_equal(result, expected) + + def test_fillna_copies_with_no_nas(self): + # Nothing to fill, should still get a copy for the Categorical method, + # but OK to get a view on CategoricalIndex method + ci = CategoricalIndex([0, 1, 1]) + result = ci.fillna(0) + assert result is not ci + assert tm.shares_memory(result, ci) + + # But at the EA level we always get a copy. + cat = ci._data + result = cat.fillna(0) + assert result._ndarray is not cat._ndarray + assert result._ndarray.base is None + assert not tm.shares_memory(result, cat) + + def test_fillna_validates_with_no_nas(self): + # We validate the fill value even if fillna is a no-op + ci = CategoricalIndex([2, 3, 3]) + cat = ci._data + + msg = "Cannot setitem on a Categorical with a new category" + res = ci.fillna(False) + # nothing to fill, so we dont cast + tm.assert_index_equal(res, ci) + + # Same check directly on the Categorical + with pytest.raises(TypeError, match=msg): + cat.fillna(False) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..522ca1bc2afde451e2d6feff780a973aceb4c39f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_formats.py @@ -0,0 +1,120 @@ +""" +Tests for CategoricalIndex.__repr__ and related methods. +""" +import pytest + +from pandas._config import using_pyarrow_string_dtype +import pandas._config.config as cf + +from pandas import CategoricalIndex +import pandas._testing as tm + + +class TestCategoricalIndexRepr: + def test_format_different_scalar_lengths(self): + # GH#35439 + idx = CategoricalIndex(["aaaaaaaaa", "b"]) + expected = ["aaaaaaaaa", "b"] + msg = r"CategoricalIndex\.format is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert idx.format() == expected + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="repr different") + def test_string_categorical_index_repr(self): + # short + idx = CategoricalIndex(["a", "bb", "ccc"]) + expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["a", "bb", "ccc"] * 10) + expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', + 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["a", "bb", "ccc"] * 100) + expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', + ... + 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'], + categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("abcdefghijklmmo")) + expected = """CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', + 'm', 'm', 'o'], + categories=['a', 'b', 'c', 'd', ..., 'k', 'l', 'm', 'o'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # short + idx = CategoricalIndex(["あ", "いい", "ううう"]) + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["あ", "いい", "ううう"] * 10) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["あ", "いい", "ううう"] * 100) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', + ... + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) + expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', + 'す', 'せ', 'そ'], + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # Enable Unicode option ----------------------------------------- + with cf.option_context("display.unicode.east_asian_width", True): + # short + idx = CategoricalIndex(["あ", "いい", "ううう"]) + expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == expected + + # multiple lines + idx = CategoricalIndex(["あ", "いい", "ううう"] * 10) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', + 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected + + # truncated + idx = CategoricalIndex(["あ", "いい", "ううう"] * 100) + expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', + 'ううう', 'あ', + ... + 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', + 'あ', 'いい', 'ううう'], + categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501 + + assert repr(idx) == expected + + # larger categories + idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ")) + expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', + 'さ', 'し', 'す', 'せ', 'そ'], + categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(idx) == expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_indexing.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..49eb79da616e7603b70ee3189e9004dd51fb33e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_indexing.py @@ -0,0 +1,420 @@ +import numpy as np +import pytest + +from pandas.errors import InvalidIndexError + +import pandas as pd +from pandas import ( + CategoricalIndex, + Index, + IntervalIndex, + Timestamp, +) +import pandas._testing as tm + + +class TestTake: + def test_take_fill_value(self): + # GH 12631 + + # numeric category + idx = CategoricalIndex([1, 2, 3], name="xxx") + result = idx.take(np.array([1, 0, -1])) + expected = CategoricalIndex([2, 1, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = CategoricalIndex([2, 1, 3], name="xxx") + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # object category + idx = CategoricalIndex( + list("CBA"), categories=list("ABC"), ordered=True, name="xxx" + ) + result = idx.take(np.array([1, 0, -1])) + expected = CategoricalIndex( + list("BCA"), categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = CategoricalIndex( + ["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = CategoricalIndex( + list("BCA"), categories=list("ABC"), ordered=True, name="xxx" + ) + tm.assert_index_equal(result, expected) + tm.assert_categorical_equal(result.values, expected.values) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + def test_take_fill_value_datetime(self): + # datetime category + idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx") + idx = CategoricalIndex(idx) + result = idx.take(np.array([1, 0, -1])) + expected = pd.DatetimeIndex( + ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx" + ) + expected = CategoricalIndex(expected) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx") + exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"]) + expected = CategoricalIndex(expected, categories=exp_cats) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = pd.DatetimeIndex( + ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx" + ) + expected = CategoricalIndex(expected) + tm.assert_index_equal(result, expected) + + msg = ( + "When allow_fill=True and fill_value is not None, " + "all indices must be >= -1" + ) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with pytest.raises(ValueError, match=msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + msg = "index -5 is out of bounds for (axis 0 with )?size 3" + with pytest.raises(IndexError, match=msg): + idx.take(np.array([1, -5])) + + def test_take_invalid_kwargs(self): + idx = CategoricalIndex([1, 2, 3], name="foo") + indices = [1, 0, -1] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + idx.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + idx.take(indices, mode="clip") + + +class TestGetLoc: + def test_get_loc(self): + # GH 12531 + cidx1 = CategoricalIndex(list("abcde"), categories=list("edabc")) + idx1 = Index(list("abcde")) + assert cidx1.get_loc("a") == idx1.get_loc("a") + assert cidx1.get_loc("e") == idx1.get_loc("e") + + for i in [cidx1, idx1]: + with pytest.raises(KeyError, match="'NOT-EXIST'"): + i.get_loc("NOT-EXIST") + + # non-unique + cidx2 = CategoricalIndex(list("aacded"), categories=list("edabc")) + idx2 = Index(list("aacded")) + + # results in bool array + res = cidx2.get_loc("d") + tm.assert_numpy_array_equal(res, idx2.get_loc("d")) + tm.assert_numpy_array_equal( + res, np.array([False, False, False, True, False, True]) + ) + # unique element results in scalar + res = cidx2.get_loc("e") + assert res == idx2.get_loc("e") + assert res == 4 + + for i in [cidx2, idx2]: + with pytest.raises(KeyError, match="'NOT-EXIST'"): + i.get_loc("NOT-EXIST") + + # non-unique, sliceable + cidx3 = CategoricalIndex(list("aabbb"), categories=list("abc")) + idx3 = Index(list("aabbb")) + + # results in slice + res = cidx3.get_loc("a") + assert res == idx3.get_loc("a") + assert res == slice(0, 2, None) + + res = cidx3.get_loc("b") + assert res == idx3.get_loc("b") + assert res == slice(2, 5, None) + + for i in [cidx3, idx3]: + with pytest.raises(KeyError, match="'c'"): + i.get_loc("c") + + def test_get_loc_unique(self): + cidx = CategoricalIndex(list("abc")) + result = cidx.get_loc("b") + assert result == 1 + + def test_get_loc_monotonic_nonunique(self): + cidx = CategoricalIndex(list("abbc")) + result = cidx.get_loc("b") + expected = slice(1, 3, None) + assert result == expected + + def test_get_loc_nonmonotonic_nonunique(self): + cidx = CategoricalIndex(list("abcb")) + result = cidx.get_loc("b") + expected = np.array([False, True, False, True], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + def test_get_loc_nan(self): + # GH#41933 + ci = CategoricalIndex(["A", "B", np.nan]) + res = ci.get_loc(np.nan) + + assert res == 2 + + +class TestGetIndexer: + def test_get_indexer_base(self): + # Determined by cat ordering. + idx = CategoricalIndex(list("cab"), categories=list("cab")) + expected = np.arange(len(idx), dtype=np.intp) + + actual = idx.get_indexer(idx) + tm.assert_numpy_array_equal(expected, actual) + + with pytest.raises(ValueError, match="Invalid fill method"): + idx.get_indexer(idx, method="invalid") + + def test_get_indexer_requires_unique(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + oidx = Index(np.array(ci)) + + msg = "Reindexing only valid with uniquely valued Index objects" + + for n in [1, 2, 5, len(ci)]: + finder = oidx[np.random.default_rng(2).integers(0, len(ci), size=n)] + + with pytest.raises(InvalidIndexError, match=msg): + ci.get_indexer(finder) + + # see gh-17323 + # + # Even when indexer is equal to the + # members in the index, we should + # respect duplicates instead of taking + # the fast-track path. + for finder in [list("aabbca"), list("aababca")]: + with pytest.raises(InvalidIndexError, match=msg): + ci.get_indexer(finder) + + def test_get_indexer_non_unique(self): + idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc")) + idx2 = CategoricalIndex(list("abf")) + + for indexer in [idx2, list("abf"), Index(list("abf"))]: + msg = "Reindexing only valid with uniquely valued Index objects" + with pytest.raises(InvalidIndexError, match=msg): + idx1.get_indexer(indexer) + + r1, _ = idx1.get_indexer_non_unique(indexer) + expected = np.array([0, 1, 2, -1], dtype=np.intp) + tm.assert_almost_equal(r1, expected) + + def test_get_indexer_method(self): + idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc")) + idx2 = CategoricalIndex(list("abf")) + + msg = "method pad not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="pad") + msg = "method backfill not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="backfill") + + msg = "method nearest not yet implemented for CategoricalIndex" + with pytest.raises(NotImplementedError, match=msg): + idx2.get_indexer(idx1, method="nearest") + + def test_get_indexer_array(self): + arr = np.array( + [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")], + dtype=object, + ) + cats = [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")] + ci = CategoricalIndex(cats, categories=cats, ordered=False, dtype="category") + result = ci.get_indexer(arr) + expected = np.array([0, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_same_order(self): + ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) + + result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["a", "b"])) + expected = np.array([1, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19551 + ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) + + result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["b", "a"])) + expected = np.array([1, 1], dtype="intp") + tm.assert_numpy_array_equal(result, expected) + + def test_get_indexer_nans_in_index_and_target(self): + # GH 45361 + ci = CategoricalIndex([1, 2, np.nan, 3]) + other1 = [2, 3, 4, np.nan] + res1 = ci.get_indexer(other1) + expected1 = np.array([1, 3, -1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(res1, expected1) + other2 = [1, 4, 2, 3] + res2 = ci.get_indexer(other2) + expected2 = np.array([0, -1, 1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(res2, expected2) + + +class TestWhere: + def test_where(self, listlike_box): + klass = listlike_box + + i = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False) + cond = [True] * len(i) + expected = i + result = i.where(klass(cond)) + tm.assert_index_equal(result, expected) + + cond = [False] + [True] * (len(i) - 1) + expected = CategoricalIndex([np.nan] + i[1:].tolist(), categories=i.categories) + result = i.where(klass(cond)) + tm.assert_index_equal(result, expected) + + def test_where_non_categories(self): + ci = CategoricalIndex(["a", "b", "c", "d"]) + mask = np.array([True, False, True, False]) + + result = ci.where(mask, 2) + expected = Index(["a", 2, "c", 2], dtype=object) + tm.assert_index_equal(result, expected) + + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + # Test the Categorical method directly + ci._data._where(mask, 2) + + +class TestContains: + def test_contains(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=False) + + assert "a" in ci + assert "z" not in ci + assert "e" not in ci + assert np.nan not in ci + + # assert codes NOT in index + assert 0 not in ci + assert 1 not in ci + + def test_contains_nan(self): + ci = CategoricalIndex(list("aabbca") + [np.nan], categories=list("cabdef")) + assert np.nan in ci + + @pytest.mark.parametrize("unwrap", [True, False]) + def test_contains_na_dtype(self, unwrap): + dti = pd.date_range("2016-01-01", periods=100).insert(0, pd.NaT) + pi = dti.to_period("D") + tdi = dti - dti[-1] + ci = CategoricalIndex(dti) + + obj = ci + if unwrap: + obj = ci._data + + assert np.nan in obj + assert None in obj + assert pd.NaT in obj + assert np.datetime64("NaT") in obj + assert np.timedelta64("NaT") not in obj + + obj2 = CategoricalIndex(tdi) + if unwrap: + obj2 = obj2._data + + assert np.nan in obj2 + assert None in obj2 + assert pd.NaT in obj2 + assert np.datetime64("NaT") not in obj2 + assert np.timedelta64("NaT") in obj2 + + obj3 = CategoricalIndex(pi) + if unwrap: + obj3 = obj3._data + + assert np.nan in obj3 + assert None in obj3 + assert pd.NaT in obj3 + assert np.datetime64("NaT") not in obj3 + assert np.timedelta64("NaT") not in obj3 + + @pytest.mark.parametrize( + "item, expected", + [ + (pd.Interval(0, 1), True), + (1.5, True), + (pd.Interval(0.5, 1.5), False), + ("a", False), + (Timestamp(1), False), + (pd.Timedelta(1), False), + ], + ids=str, + ) + def test_contains_interval(self, item, expected): + # GH 23705 + ci = CategoricalIndex(IntervalIndex.from_breaks(range(3))) + result = item in ci + assert result is expected + + def test_contains_list(self): + # GH#21729 + idx = CategoricalIndex([1, 2, 3]) + + assert "a" not in idx + + with pytest.raises(TypeError, match="unhashable type"): + ["a"] in idx + + with pytest.raises(TypeError, match="unhashable type"): + ["a", "b"] in idx diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_map.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_map.py new file mode 100644 index 0000000000000000000000000000000000000000..baf836594dfb5e03332b57522f39a679ee5b1e40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_map.py @@ -0,0 +1,144 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + CategoricalIndex, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "data, categories", + [ + (list("abcbca"), list("cab")), + (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)), + ], + ids=["string", "interval"], +) +def test_map_str(data, categories, ordered): + # GH 31202 - override base class since we want to maintain categorical/ordered + index = CategoricalIndex(data, categories=categories, ordered=ordered) + result = index.map(str) + expected = CategoricalIndex( + map(str, data), categories=map(str, categories), ordered=ordered + ) + tm.assert_index_equal(result, expected) + + +def test_map(): + ci = CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True) + result = ci.map(lambda x: x.lower()) + exp = CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True) + tm.assert_index_equal(result, exp) + + ci = CategoricalIndex( + list("ABABC"), categories=list("BAC"), ordered=False, name="XXX" + ) + result = ci.map(lambda x: x.lower()) + exp = CategoricalIndex( + list("ababc"), categories=list("bac"), ordered=False, name="XXX" + ) + tm.assert_index_equal(result, exp) + + # GH 12766: Return an index not an array + tm.assert_index_equal( + ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX") + ) + + # change categories dtype + ci = CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False) + + def f(x): + return {"A": 10, "B": 20, "C": 30}.get(x) + + result = ci.map(f) + exp = CategoricalIndex([10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False) + tm.assert_index_equal(result, exp) + + result = ci.map(Series([10, 20, 30], index=["A", "B", "C"])) + tm.assert_index_equal(result, exp) + + result = ci.map({"A": 10, "B": 20, "C": 30}) + tm.assert_index_equal(result, exp) + + +def test_map_with_categorical_series(): + # GH 12756 + a = Index([1, 2, 3, 4]) + b = Series(["even", "odd", "even", "odd"], dtype="category") + c = Series(["even", "odd", "even", "odd"]) + + exp = CategoricalIndex(["odd", "even", "odd", np.nan]) + tm.assert_index_equal(a.map(b), exp) + exp = Index(["odd", "even", "odd", np.nan]) + tm.assert_index_equal(a.map(c), exp) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], pd.isna, Index([False, False, np.nan])), + ([1, 1, np.nan], {1: False}, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + CategoricalIndex([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False, False, False]), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_ignore(data, f, expected): # GH 24241 + values = CategoricalIndex(data) + result = values.map(f, na_action="ignore") + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, Index([False, False, True])), + ([1, 2, np.nan], pd.isna, Index([False, False, True])), + ([1, 1, np.nan], {1: False}, CategoricalIndex([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + CategoricalIndex([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False, False, False]), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_none(data, f, expected): # GH 24241 + values = CategoricalIndex(data) + result = values.map(f, na_action=None) + tm.assert_index_equal(result, expected) + + +def test_map_with_dict_or_series(): + orig_values = ["a", "B", 1, "a"] + new_values = ["one", 2, 3.0, "one"] + cur_index = CategoricalIndex(orig_values, name="XXX") + expected = CategoricalIndex(new_values, name="XXX", categories=[3.0, 2, "one"]) + + mapper = Series(new_values[:-1], index=orig_values[:-1]) + result = cur_index.map(mapper) + # Order of categories in result can be different + tm.assert_index_equal(result, expected) + + mapper = dict(zip(orig_values[:-1], new_values[:-1])) + result = cur_index.map(mapper) + # Order of categories in result can be different + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py new file mode 100644 index 0000000000000000000000000000000000000000..5b1f2b9fb159a6873c83e0a0a4e777913bb99fee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_reindex.py @@ -0,0 +1,78 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + Interval, +) +import pandas._testing as tm + + +class TestReindex: + def test_reindex_list_non_unique(self): + # GH#11586 + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) + + def test_reindex_categorical_non_unique(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) + + def test_reindex_list_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(["a", "c"]) + + def test_reindex_categorical_non_unique_unused_category(self): + msg = "cannot reindex on an axis with duplicate labels" + ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + with pytest.raises(ValueError, match=msg): + ci.reindex(Categorical(["a", "c"])) + + def test_reindex_duplicate_target(self): + # See GH25459 + cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"]) + res, indexer = cat.reindex(["a", "c", "c"]) + exp = Index(["a", "c", "c"]) + tm.assert_index_equal(res, exp, exact=True) + tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp)) + + res, indexer = cat.reindex( + CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"]) + ) + exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"]) + tm.assert_index_equal(res, exp, exact=True) + tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp)) + + def test_reindex_empty_index(self): + # See GH16770 + c = CategoricalIndex([]) + res, indexer = c.reindex(["a", "b"]) + tm.assert_index_equal(res, Index(["a", "b"]), exact=True) + tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp)) + + def test_reindex_categorical_added_category(self): + # GH 42424 + ci = CategoricalIndex( + [Interval(0, 1, closed="right"), Interval(1, 2, closed="right")], + ordered=True, + ) + ci_add = CategoricalIndex( + [ + Interval(0, 1, closed="right"), + Interval(1, 2, closed="right"), + Interval(2, 3, closed="right"), + Interval(3, 4, closed="right"), + ], + ordered=True, + ) + result, _ = ci.reindex(ci_add) + expected = ci_add + tm.assert_index_equal(expected, result) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_setops.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_setops.py new file mode 100644 index 0000000000000000000000000000000000000000..2e87b90efd54c8fcc4dcab7ec538d461add370de --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/categorical/test_setops.py @@ -0,0 +1,18 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalIndex, + Index, +) +import pandas._testing as tm + + +@pytest.mark.parametrize("na_value", [None, np.nan]) +def test_difference_with_na(na_value): + # GH 57318 + ci = CategoricalIndex(["a", "b", "c", None]) + other = Index(["c", na_value]) + result = ci.difference(other) + expected = CategoricalIndex(["a", "b"], categories=["a", "b", "c"]) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60d98ec578df955bb8af38f0b309c03d96457f36 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29dcd75980d22070b2787c8275b6c905ce3f9ccc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_analytics.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_analytics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fc6b6da179e1a88c160084462f1dccccffb6575 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_analytics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d31c835c5960decaf772e2fde39ecb6e8408781 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_drop.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_drop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..956de8d200be0455ae5bbb1e2325574783401e26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_drop.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_duplicates.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_duplicates.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a630d4a8cf69311d7b32d1c18909b6201051c44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_duplicates.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_equivalence.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_equivalence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..109abfdb6ec14659ae6e30f9042a265b62b527f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_equivalence.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_formats.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89b338e8d278054b7f104c899baa53ffe196cf03 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_formats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_set.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_set.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd307e32f6dcb716259cc6c1ec4662a74155ce57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_get_set.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_integrity.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_integrity.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76e56af8fe38acb99db09548d617bafbd629e916 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_integrity.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_join.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_join.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df23bba4e87b9c541a930f8018e55772230ccf93 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_join.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_missing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bcdc60004a0ab675ef1cbd6f9bb866385050cc4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_missing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_names.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_names.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9d538275f4aea9df9351ca7780d25976653e542 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_names.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_partial_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_partial_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76d31b9f81e995d94febabab6971937cbd482f70 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_partial_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_pickle.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_pickle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..937051058322b8cae0314c182623673a7103aa3d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/multi/__pycache__/test_pickle.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b776ac21bc5873f325295e27d0347a273bf4362 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/test_astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e6e678b415b68ab50296647428466ff283c5116 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/test_astype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19c40ddd7188b08759c81adf4cedf6d0c70fb070 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/test_astype.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..9c1ef302c5b51cad92e2c0b1996d4d58d90e5ec6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/test_astype.py @@ -0,0 +1,33 @@ +import pytest + +from pandas import ( + Index, + NaT, + Series, +) +import pandas._testing as tm + + +def test_astype_str_from_bytes(): + # https://github.com/pandas-dev/pandas/issues/38607 + # GH#49658 pre-2.0 Index called .values.astype(str) here, which effectively + # did a .decode() on the bytes object. In 2.0 we go through + # ensure_string_array which does f"{val}" + idx = Index(["あ", b"a"], dtype="object") + result = idx.astype(str) + expected = Index(["あ", "a"], dtype="object") + tm.assert_index_equal(result, expected) + + # while we're here, check that Series.astype behaves the same + result = Series(idx).astype(str) + expected = Series(expected, dtype=object) + tm.assert_series_equal(result, expected) + + +def test_astype_invalid_nas_to_tdt64_raises(): + # GH#45722 don't cast np.datetime64 NaTs to timedelta64 NaT + idx = Index([NaT.asm8] * 2, dtype=object) + + msg = r"Invalid type for timedelta scalar: " + with pytest.raises(TypeError, match=msg): + idx.astype("m8[ns]") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/test_indexing.py b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..443cacf94d23923fd2fce28eec45f0626fe5745d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/indexes/object/test_indexing.py @@ -0,0 +1,243 @@ +from decimal import Decimal + +import numpy as np +import pytest + +from pandas._libs.missing import ( + NA, + is_matching_na, +) +from pandas.compat import pa_version_under16p0 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import Index +import pandas._testing as tm + + +class TestGetIndexer: + @pytest.mark.parametrize( + "method,expected", + [ + ("pad", np.array([-1, 0, 1, 1], dtype=np.intp)), + ("backfill", np.array([0, 0, 1, -1], dtype=np.intp)), + ], + ) + def test_get_indexer_strings(self, method, expected): + index = Index(["b", "c"]) + actual = index.get_indexer(["a", "b", "c", "d"], method=method) + + tm.assert_numpy_array_equal(actual, expected) + + def test_get_indexer_strings_raises(self, using_infer_string): + index = Index(["b", "c"]) + + if using_infer_string: + import pyarrow as pa + + msg = "has no kernel" + with pytest.raises(pa.lib.ArrowNotImplementedError, match=msg): + index.get_indexer(["a", "b", "c", "d"], method="nearest") + + with pytest.raises(pa.lib.ArrowNotImplementedError, match=msg): + index.get_indexer(["a", "b", "c", "d"], method="pad", tolerance=2) + + with pytest.raises(pa.lib.ArrowNotImplementedError, match=msg): + index.get_indexer( + ["a", "b", "c", "d"], method="pad", tolerance=[2, 2, 2, 2] + ) + + else: + msg = r"unsupported operand type\(s\) for -: 'str' and 'str'" + with pytest.raises(TypeError, match=msg): + index.get_indexer(["a", "b", "c", "d"], method="nearest") + + with pytest.raises(TypeError, match=msg): + index.get_indexer(["a", "b", "c", "d"], method="pad", tolerance=2) + + with pytest.raises(TypeError, match=msg): + index.get_indexer( + ["a", "b", "c", "d"], method="pad", tolerance=[2, 2, 2, 2] + ) + + def test_get_indexer_with_NA_values( + self, unique_nulls_fixture, unique_nulls_fixture2 + ): + # GH#22332 + # check pairwise, that no pair of na values + # is mangled + if unique_nulls_fixture is unique_nulls_fixture2: + return # skip it, values are not unique + arr = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object) + index = Index(arr, dtype=object) + result = index.get_indexer( + Index( + [unique_nulls_fixture, unique_nulls_fixture2, "Unknown"], dtype=object + ) + ) + expected = np.array([0, 1, -1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + +class TestGetIndexerNonUnique: + def test_get_indexer_non_unique_nas( + self, nulls_fixture, request, using_infer_string + ): + # even though this isn't non-unique, this should still work + if using_infer_string and (nulls_fixture is None or nulls_fixture is NA): + request.applymarker(pytest.mark.xfail(reason="NAs are cast to NaN")) + index = Index(["a", "b", nulls_fixture]) + indexer, missing = index.get_indexer_non_unique([nulls_fixture]) + + expected_indexer = np.array([2], dtype=np.intp) + expected_missing = np.array([], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + + # actually non-unique + index = Index(["a", nulls_fixture, "b", nulls_fixture]) + indexer, missing = index.get_indexer_non_unique([nulls_fixture]) + + expected_indexer = np.array([1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + + # matching-but-not-identical nans + if is_matching_na(nulls_fixture, float("NaN")): + index = Index(["a", float("NaN"), "b", float("NaN")]) + match_but_not_identical = True + elif is_matching_na(nulls_fixture, Decimal("NaN")): + index = Index(["a", Decimal("NaN"), "b", Decimal("NaN")]) + match_but_not_identical = True + else: + match_but_not_identical = False + + if match_but_not_identical: + indexer, missing = index.get_indexer_non_unique([nulls_fixture]) + + expected_indexer = np.array([1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + + @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning") + def test_get_indexer_non_unique_np_nats(self, np_nat_fixture, np_nat_fixture2): + expected_missing = np.array([], dtype=np.intp) + # matching-but-not-identical nats + if is_matching_na(np_nat_fixture, np_nat_fixture2): + # ensure nats are different objects + index = Index( + np.array( + ["2021-10-02", np_nat_fixture.copy(), np_nat_fixture2.copy()], + dtype=object, + ), + dtype=object, + ) + # pass as index to prevent target from being casted to DatetimeIndex + indexer, missing = index.get_indexer_non_unique( + Index([np_nat_fixture], dtype=object) + ) + expected_indexer = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + # dt64nat vs td64nat + else: + try: + np_nat_fixture == np_nat_fixture2 + except (TypeError, OverflowError): + # Numpy will raise on uncomparable types, like + # np.datetime64('NaT', 'Y') and np.datetime64('NaT', 'ps') + # https://github.com/numpy/numpy/issues/22762 + return + index = Index( + np.array( + [ + "2021-10-02", + np_nat_fixture, + np_nat_fixture2, + np_nat_fixture, + np_nat_fixture2, + ], + dtype=object, + ), + dtype=object, + ) + # pass as index to prevent target from being casted to DatetimeIndex + indexer, missing = index.get_indexer_non_unique( + Index([np_nat_fixture], dtype=object) + ) + expected_indexer = np.array([1, 3], dtype=np.intp) + tm.assert_numpy_array_equal(indexer, expected_indexer) + tm.assert_numpy_array_equal(missing, expected_missing) + + +class TestSliceLocs: + @pytest.mark.parametrize( + "dtype", + [ + "object", + pytest.param("string[pyarrow_numpy]", marks=td.skip_if_no("pyarrow")), + ], + ) + @pytest.mark.parametrize( + "in_slice,expected", + [ + # error: Slice index must be an integer or None + (pd.IndexSlice[::-1], "yxdcb"), + (pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc] + (pd.IndexSlice["b"::-1], "b"), # type: ignore[misc] + (pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc] + (pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc] + # absent labels + (pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc] + (pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc] + (pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc] + (pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc] + (pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc] + (pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc] + (pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc] + (pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc] + ], + ) + def test_slice_locs_negative_step(self, in_slice, expected, dtype, request): + if ( + not pa_version_under16p0 + and dtype == "string[pyarrow_numpy]" + and in_slice == slice("a", "a", -1) + ): + request.applymarker( + pytest.mark.xfail(reason="https://github.com/apache/arrow/issues/40642") + ) + + index = Index(list("bcdxy"), dtype=dtype) + + s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step) + result = index[s_start : s_stop : in_slice.step] + expected = Index(list(expected), dtype=dtype) + tm.assert_index_equal(result, expected) + + @td.skip_if_no("pyarrow") + def test_slice_locs_negative_step_oob(self): + index = Index(list("bcdxy"), dtype="string[pyarrow_numpy]") + + result = index[-10:5:1] + tm.assert_index_equal(result, index) + + result = index[4:-10:-1] + expected = Index(list("yxdcb"), dtype="string[pyarrow_numpy]") + tm.assert_index_equal(result, expected) + + def test_slice_locs_dup(self): + index = Index(["a", "a", "b", "c", "d", "d"]) + assert index.slice_locs("a", "d") == (0, 6) + assert index.slice_locs(end="d") == (0, 6) + assert index.slice_locs("a", "c") == (0, 4) + assert index.slice_locs("b", "d") == (2, 6) + + index2 = index[::-1] + assert index2.slice_locs("d", "a") == (0, 6) + assert index2.slice_locs(end="a") == (0, 6) + assert index2.slice_locs("d", "b") == (0, 4) + assert index2.slice_locs("c", "a") == (2, 6) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad10d7b20c210b068d73ac3f4ebc27f46cb03d00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a42726bee7fa4a04ecc532c0d7dad76eca20562 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e48ae4d31ada1d96969e7e264fe60901e400c62d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_style.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/common.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/common.py new file mode 100644 index 0000000000000000000000000000000000000000..69120160699c24cc86670522f84ec6c7014c20ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/common.py @@ -0,0 +1,563 @@ +""" +Module consolidating common testing functions for checking plotting. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.api import is_list_like + +import pandas as pd +from pandas import Series +import pandas._testing as tm + +if TYPE_CHECKING: + from collections.abc import Sequence + + from matplotlib.axes import Axes + + +def _check_legend_labels(axes, labels=None, visible=True): + """ + Check each axes has expected legend labels + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + labels : list-like + expected legend labels + visible : bool + expected legend visibility. labels are checked only when visible is + True + """ + if visible and (labels is None): + raise ValueError("labels must be specified when visible is True") + axes = _flatten_visible(axes) + for ax in axes: + if visible: + assert ax.get_legend() is not None + _check_text_labels(ax.get_legend().get_texts(), labels) + else: + assert ax.get_legend() is None + + +def _check_legend_marker(ax, expected_markers=None, visible=True): + """ + Check ax has expected legend markers + + Parameters + ---------- + ax : matplotlib Axes object + expected_markers : list-like + expected legend markers + visible : bool + expected legend visibility. labels are checked only when visible is + True + """ + if visible and (expected_markers is None): + raise ValueError("Markers must be specified when visible is True") + if visible: + handles, _ = ax.get_legend_handles_labels() + markers = [handle.get_marker() for handle in handles] + assert markers == expected_markers + else: + assert ax.get_legend() is None + + +def _check_data(xp, rs): + """ + Check each axes has identical lines + + Parameters + ---------- + xp : matplotlib Axes object + rs : matplotlib Axes object + """ + import matplotlib.pyplot as plt + + xp_lines = xp.get_lines() + rs_lines = rs.get_lines() + + assert len(xp_lines) == len(rs_lines) + for xpl, rsl in zip(xp_lines, rs_lines): + xpdata = xpl.get_xydata() + rsdata = rsl.get_xydata() + tm.assert_almost_equal(xpdata, rsdata) + + plt.close("all") + + +def _check_visible(collections, visible=True): + """ + Check each artist is visible or not + + Parameters + ---------- + collections : matplotlib Artist or its list-like + target Artist or its list or collection + visible : bool + expected visibility + """ + from matplotlib.collections import Collection + + if not isinstance(collections, Collection) and not is_list_like(collections): + collections = [collections] + + for patch in collections: + assert patch.get_visible() == visible + + +def _check_patches_all_filled(axes: Axes | Sequence[Axes], filled: bool = True) -> None: + """ + Check for each artist whether it is filled or not + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + filled : bool + expected filling + """ + + axes = _flatten_visible(axes) + for ax in axes: + for patch in ax.patches: + assert patch.fill == filled + + +def _get_colors_mapped(series, colors): + unique = series.unique() + # unique and colors length can be differed + # depending on slice value + mapped = dict(zip(unique, colors)) + return [mapped[v] for v in series.values] + + +def _check_colors(collections, linecolors=None, facecolors=None, mapping=None): + """ + Check each artist has expected line colors and face colors + + Parameters + ---------- + collections : list-like + list or collection of target artist + linecolors : list-like which has the same length as collections + list of expected line colors + facecolors : list-like which has the same length as collections + list of expected face colors + mapping : Series + Series used for color grouping key + used for andrew_curves, parallel_coordinates, radviz test + """ + from matplotlib import colors + from matplotlib.collections import ( + Collection, + LineCollection, + PolyCollection, + ) + from matplotlib.lines import Line2D + + conv = colors.ColorConverter + if linecolors is not None: + if mapping is not None: + linecolors = _get_colors_mapped(mapping, linecolors) + linecolors = linecolors[: len(collections)] + + assert len(collections) == len(linecolors) + for patch, color in zip(collections, linecolors): + if isinstance(patch, Line2D): + result = patch.get_color() + # Line2D may contains string color expression + result = conv.to_rgba(result) + elif isinstance(patch, (PolyCollection, LineCollection)): + result = tuple(patch.get_edgecolor()[0]) + else: + result = patch.get_edgecolor() + + expected = conv.to_rgba(color) + assert result == expected + + if facecolors is not None: + if mapping is not None: + facecolors = _get_colors_mapped(mapping, facecolors) + facecolors = facecolors[: len(collections)] + + assert len(collections) == len(facecolors) + for patch, color in zip(collections, facecolors): + if isinstance(patch, Collection): + # returned as list of np.array + result = patch.get_facecolor()[0] + else: + result = patch.get_facecolor() + + if isinstance(result, np.ndarray): + result = tuple(result) + + expected = conv.to_rgba(color) + assert result == expected + + +def _check_text_labels(texts, expected): + """ + Check each text has expected labels + + Parameters + ---------- + texts : matplotlib Text object, or its list-like + target text, or its list + expected : str or list-like which has the same length as texts + expected text label, or its list + """ + if not is_list_like(texts): + assert texts.get_text() == expected + else: + labels = [t.get_text() for t in texts] + assert len(labels) == len(expected) + for label, e in zip(labels, expected): + assert label == e + + +def _check_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None): + """ + Check each axes has expected tick properties + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xlabelsize : number + expected xticks font size + xrot : number + expected xticks rotation + ylabelsize : number + expected yticks font size + yrot : number + expected yticks rotation + """ + from matplotlib.ticker import NullFormatter + + axes = _flatten_visible(axes) + for ax in axes: + if xlabelsize is not None or xrot is not None: + if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter): + # If minor ticks has NullFormatter, rot / fontsize are not + # retained + labels = ax.get_xticklabels() + else: + labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True) + + for label in labels: + if xlabelsize is not None: + tm.assert_almost_equal(label.get_fontsize(), xlabelsize) + if xrot is not None: + tm.assert_almost_equal(label.get_rotation(), xrot) + + if ylabelsize is not None or yrot is not None: + if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter): + labels = ax.get_yticklabels() + else: + labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True) + + for label in labels: + if ylabelsize is not None: + tm.assert_almost_equal(label.get_fontsize(), ylabelsize) + if yrot is not None: + tm.assert_almost_equal(label.get_rotation(), yrot) + + +def _check_ax_scales(axes, xaxis="linear", yaxis="linear"): + """ + Check each axes has expected scales + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xaxis : {'linear', 'log'} + expected xaxis scale + yaxis : {'linear', 'log'} + expected yaxis scale + """ + axes = _flatten_visible(axes) + for ax in axes: + assert ax.xaxis.get_scale() == xaxis + assert ax.yaxis.get_scale() == yaxis + + +def _check_axes_shape(axes, axes_num=None, layout=None, figsize=None): + """ + Check expected number of axes is drawn in expected layout + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + axes_num : number + expected number of axes. Unnecessary axes should be set to + invisible. + layout : tuple + expected layout, (expected number of rows , columns) + figsize : tuple + expected figsize. default is matplotlib default + """ + from pandas.plotting._matplotlib.tools import flatten_axes + + if figsize is None: + figsize = (6.4, 4.8) + visible_axes = _flatten_visible(axes) + + if axes_num is not None: + assert len(visible_axes) == axes_num + for ax in visible_axes: + # check something drawn on visible axes + assert len(ax.get_children()) > 0 + + if layout is not None: + x_set = set() + y_set = set() + for ax in flatten_axes(axes): + # check axes coordinates to estimate layout + points = ax.get_position().get_points() + x_set.add(points[0][0]) + y_set.add(points[0][1]) + result = (len(y_set), len(x_set)) + assert result == layout + + tm.assert_numpy_array_equal( + visible_axes[0].figure.get_size_inches(), + np.array(figsize, dtype=np.float64), + ) + + +def _flatten_visible(axes: Axes | Sequence[Axes]) -> Sequence[Axes]: + """ + Flatten axes, and filter only visible + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + + """ + from pandas.plotting._matplotlib.tools import flatten_axes + + axes_ndarray = flatten_axes(axes) + axes = [ax for ax in axes_ndarray if ax.get_visible()] + return axes + + +def _check_has_errorbars(axes, xerr=0, yerr=0): + """ + Check axes has expected number of errorbars + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xerr : number + expected number of x errorbar + yerr : number + expected number of y errorbar + """ + axes = _flatten_visible(axes) + for ax in axes: + containers = ax.containers + xerr_count = 0 + yerr_count = 0 + for c in containers: + has_xerr = getattr(c, "has_xerr", False) + has_yerr = getattr(c, "has_yerr", False) + if has_xerr: + xerr_count += 1 + if has_yerr: + yerr_count += 1 + assert xerr == xerr_count + assert yerr == yerr_count + + +def _check_box_return_type( + returned, return_type, expected_keys=None, check_ax_title=True +): + """ + Check box returned type is correct + + Parameters + ---------- + returned : object to be tested, returned from boxplot + return_type : str + return_type passed to boxplot + expected_keys : list-like, optional + group labels in subplot case. If not passed, + the function checks assuming boxplot uses single ax + check_ax_title : bool + Whether to check the ax.title is the same as expected_key + Intended to be checked by calling from ``boxplot``. + Normal ``plot`` doesn't attach ``ax.title``, it must be disabled. + """ + from matplotlib.axes import Axes + + types = {"dict": dict, "axes": Axes, "both": tuple} + if expected_keys is None: + # should be fixed when the returning default is changed + if return_type is None: + return_type = "dict" + + assert isinstance(returned, types[return_type]) + if return_type == "both": + assert isinstance(returned.ax, Axes) + assert isinstance(returned.lines, dict) + else: + # should be fixed when the returning default is changed + if return_type is None: + for r in _flatten_visible(returned): + assert isinstance(r, Axes) + return + + assert isinstance(returned, Series) + + assert sorted(returned.keys()) == sorted(expected_keys) + for key, value in returned.items(): + assert isinstance(value, types[return_type]) + # check returned dict has correct mapping + if return_type == "axes": + if check_ax_title: + assert value.get_title() == key + elif return_type == "both": + if check_ax_title: + assert value.ax.get_title() == key + assert isinstance(value.ax, Axes) + assert isinstance(value.lines, dict) + elif return_type == "dict": + line = value["medians"][0] + axes = line.axes + if check_ax_title: + assert axes.get_title() == key + else: + raise AssertionError + + +def _check_grid_settings(obj, kinds, kws={}): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + + import matplotlib as mpl + + def is_grid_on(): + xticks = mpl.pyplot.gca().xaxis.get_major_ticks() + yticks = mpl.pyplot.gca().yaxis.get_major_ticks() + xoff = all(not g.gridline.get_visible() for g in xticks) + yoff = all(not g.gridline.get_visible() for g in yticks) + + return not (xoff and yoff) + + spndx = 1 + for kind in kinds: + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=False) + obj.plot(kind=kind, **kws) + assert not is_grid_on() + mpl.pyplot.clf() + + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=True) + obj.plot(kind=kind, grid=False, **kws) + assert not is_grid_on() + mpl.pyplot.clf() + + if kind not in ["pie", "hexbin", "scatter"]: + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=True) + obj.plot(kind=kind, **kws) + assert is_grid_on() + mpl.pyplot.clf() + + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=False) + obj.plot(kind=kind, grid=True, **kws) + assert is_grid_on() + mpl.pyplot.clf() + + +def _unpack_cycler(rcParams, field="color"): + """ + Auxiliary function for correctly unpacking cycler after MPL >= 1.5 + """ + return [v[field] for v in rcParams["axes.prop_cycle"]] + + +def get_x_axis(ax): + return ax._shared_axes["x"] + + +def get_y_axis(ax): + return ax._shared_axes["y"] + + +def _check_plot_works(f, default_axes=False, **kwargs): + """ + Create plot and ensure that plot return object is valid. + + Parameters + ---------- + f : func + Plotting function. + default_axes : bool, optional + If False (default): + - If `ax` not in `kwargs`, then create subplot(211) and plot there + - Create new subplot(212) and plot there as well + - Mind special corner case for bootstrap_plot (see `_gen_two_subplots`) + If True: + - Simply run plotting function with kwargs provided + - All required axes instances will be created automatically + - It is recommended to use it when the plotting function + creates multiple axes itself. It helps avoid warnings like + 'UserWarning: To output multiple subplots, + the figure containing the passed axes is being cleared' + **kwargs + Keyword arguments passed to the plotting function. + + Returns + ------- + Plot object returned by the last plotting. + """ + import matplotlib.pyplot as plt + + if default_axes: + gen_plots = _gen_default_plot + else: + gen_plots = _gen_two_subplots + + ret = None + try: + fig = kwargs.get("figure", plt.gcf()) + plt.clf() + + for ret in gen_plots(f, fig, **kwargs): + tm.assert_is_valid_plot_return_object(ret) + + finally: + plt.close(fig) + + return ret + + +def _gen_default_plot(f, fig, **kwargs): + """ + Create plot in a default way. + """ + yield f(**kwargs) + + +def _gen_two_subplots(f, fig, **kwargs): + """ + Create plot on two subplots forcefully created. + """ + if "ax" not in kwargs: + fig.add_subplot(211) + yield f(**kwargs) + + if f is pd.plotting.bootstrap_plot: + assert "ax" not in kwargs + else: + kwargs["ax"] = fig.add_subplot(212) + yield f(**kwargs) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..d688bbd47595c2ec6451bd9ddf7c916275013384 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py @@ -0,0 +1,56 @@ +import gc + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + to_datetime, +) + + +@pytest.fixture(autouse=True) +def mpl_cleanup(): + # matplotlib/testing/decorators.py#L24 + # 1) Resets units registry + # 2) Resets rc_context + # 3) Closes all figures + mpl = pytest.importorskip("matplotlib") + mpl_units = pytest.importorskip("matplotlib.units") + plt = pytest.importorskip("matplotlib.pyplot") + orig_units_registry = mpl_units.registry.copy() + with mpl.rc_context(): + mpl.use("template") + yield + mpl_units.registry.clear() + mpl_units.registry.update(orig_units_registry) + plt.close("all") + # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501 + gc.collect(1) + + +@pytest.fixture +def hist_df(): + n = 50 + rng = np.random.default_rng(10) + gender = rng.choice(["Male", "Female"], size=n) + classroom = rng.choice(["A", "B", "C"], size=n) + + hist_df = DataFrame( + { + "gender": gender, + "classroom": classroom, + "height": rng.normal(66, 4, size=n), + "weight": rng.normal(161, 32, size=n), + "category": rng.integers(4, size=n), + "datetime": to_datetime( + rng.integers( + 812419200000000000, + 819331200000000000, + size=n, + dtype=np.int64, + ) + ), + } + ) + return hist_df diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..8a8643415ae12f96bbbd87ed85ff74f8813b07e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py @@ -0,0 +1,98 @@ +import sys +import types + +import pytest + +import pandas.util._test_decorators as td + +import pandas + + +@pytest.fixture +def dummy_backend(): + db = types.ModuleType("pandas_dummy_backend") + setattr(db, "plot", lambda *args, **kwargs: "used_dummy") + return db + + +@pytest.fixture +def restore_backend(): + """Restore the plotting backend to matplotlib""" + with pandas.option_context("plotting.backend", "matplotlib"): + yield + + +def test_backend_is_not_module(): + msg = "Could not find plotting backend 'not_an_existing_module'." + with pytest.raises(ValueError, match=msg): + pandas.set_option("plotting.backend", "not_an_existing_module") + + assert pandas.options.plotting.backend == "matplotlib" + + +def test_backend_is_correct(monkeypatch, restore_backend, dummy_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + + pandas.set_option("plotting.backend", "pandas_dummy_backend") + assert pandas.get_option("plotting.backend") == "pandas_dummy_backend" + assert ( + pandas.plotting._core._get_plot_backend("pandas_dummy_backend") is dummy_backend + ) + + +def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend, dummy_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + df = pandas.DataFrame([1, 2, 3]) + + assert pandas.get_option("plotting.backend") == "matplotlib" + assert df.plot(backend="pandas_dummy_backend") == "used_dummy" + + +def test_register_entrypoint(restore_backend, tmp_path, monkeypatch, dummy_backend): + monkeypatch.syspath_prepend(tmp_path) + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + + dist_info = tmp_path / "my_backend-0.0.0.dist-info" + dist_info.mkdir() + # entry_point name should not match module name - otherwise pandas will + # fall back to backend lookup by module name + (dist_info / "entry_points.txt").write_bytes( + b"[pandas_plotting_backends]\nmy_ep_backend = pandas_dummy_backend\n" + ) + + assert pandas.plotting._core._get_plot_backend("my_ep_backend") is dummy_backend + + with pandas.option_context("plotting.backend", "my_ep_backend"): + assert pandas.plotting._core._get_plot_backend() is dummy_backend + + +def test_setting_backend_without_plot_raises(monkeypatch): + # GH-28163 + module = types.ModuleType("pandas_plot_backend") + monkeypatch.setitem(sys.modules, "pandas_plot_backend", module) + + assert pandas.options.plotting.backend == "matplotlib" + with pytest.raises( + ValueError, match="Could not find plotting backend 'pandas_plot_backend'." + ): + pandas.set_option("plotting.backend", "pandas_plot_backend") + + assert pandas.options.plotting.backend == "matplotlib" + + +@td.skip_if_installed("matplotlib") +def test_no_matplotlib_ok(): + msg = ( + 'matplotlib is required for plotting when the default backend "matplotlib" is ' + "selected." + ) + with pytest.raises(ImportError, match=msg): + pandas.plotting._core._get_plot_backend("matplotlib") + + +def test_extra_kinds_ok(monkeypatch, restore_backend, dummy_backend): + # https://github.com/pandas-dev/pandas/pull/28647 + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + pandas.set_option("plotting.backend", "pandas_dummy_backend") + df = pandas.DataFrame({"A": [1, 2, 3]}) + df.plot(kind="not a real kind") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py new file mode 100644 index 0000000000000000000000000000000000000000..76f7fa1f22eec4bdb7464619226352c918d31a02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py @@ -0,0 +1,761 @@ +""" Test cases for .boxplot method """ + +import itertools +import string + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, + date_range, + plotting, + timedelta_range, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_box_return_type, + _check_plot_works, + _check_ticks_props, + _check_visible, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +def _check_ax_limits(col, ax): + y_min, y_max = ax.get_ylim() + assert y_min <= col.min() + assert y_max >= col.max() + + +class TestDataFramePlots: + def test_stacked_boxplot_set_axis(self): + # GH2980 + import matplotlib.pyplot as plt + + n = 80 + df = DataFrame( + { + "Clinical": np.random.default_rng(2).choice([0, 1, 2, 3], n), + "Confirmed": np.random.default_rng(2).choice([0, 1, 2, 3], n), + "Discarded": np.random.default_rng(2).choice([0, 1, 2, 3], n), + }, + index=np.arange(0, n), + ) + ax = df.plot(kind="bar", stacked=True) + assert [int(x.get_text()) for x in ax.get_xticklabels()] == df.index.to_list() + ax.set_xticks(np.arange(0, 80, 10)) + plt.draw() # Update changes + assert [int(x.get_text()) for x in ax.get_xticklabels()] == list( + np.arange(0, 80, 10) + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs, warn", + [ + [{"return_type": "dict"}, None], + [{"column": ["one", "two"]}, None], + [{"column": ["one", "two"], "by": "indic"}, UserWarning], + [{"column": ["one"], "by": ["indic", "indic2"]}, None], + [{"by": "indic"}, UserWarning], + [{"by": ["indic", "indic2"]}, UserWarning], + [{"notch": 1}, None], + [{"by": "indic", "notch": 1}, UserWarning], + ], + ) + def test_boxplot_legacy1(self, kwargs, warn): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + df["indic"] = ["foo", "bar"] * 3 + df["indic2"] = ["foo", "bar", "foo"] * 2 + + # _check_plot_works can add an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(warn, check_stacklevel=False): + _check_plot_works(df.boxplot, **kwargs) + + def test_boxplot_legacy1_series(self): + ser = Series(np.random.default_rng(2).standard_normal(6)) + _check_plot_works(plotting._core.boxplot, data=ser, return_type="dict") + + def test_boxplot_legacy2(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(df.boxplot, by="X") + + def test_boxplot_legacy2_with_ax(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # When ax is supplied and required number of axes is 1, + # passed ax should be used: + _, ax = mpl.pyplot.subplots() + axes = df.boxplot("Col1", by="X", ax=ax) + ax_axes = ax.axes + assert ax_axes is axes + + def test_boxplot_legacy2_with_ax_return_type(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + fig, ax = mpl.pyplot.subplots() + axes = df.groupby("Y").boxplot(ax=ax, return_type="axes") + ax_axes = ax.axes + assert ax_axes is axes["A"] + + def test_boxplot_legacy2_with_multi_col(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # Multiple columns with an ax argument should use same figure + fig, ax = mpl.pyplot.subplots() + with tm.assert_produces_warning(UserWarning): + axes = df.boxplot( + column=["Col1", "Col2"], by="X", ax=ax, return_type="axes" + ) + assert axes["Col1"].get_figure() is fig + + def test_boxplot_legacy2_by_none(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # When by is None, check that all relevant lines are present in the + # dict + _, ax = mpl.pyplot.subplots() + d = df.boxplot(ax=ax, return_type="dict") + lines = list(itertools.chain.from_iterable(d.values())) + assert len(ax.get_lines()) == len(lines) + + def test_boxplot_return_type_none(self, hist_df): + # GH 12216; return_type=None & by=None -> axes + result = hist_df.boxplot() + assert isinstance(result, mpl.pyplot.Axes) + + def test_boxplot_return_type_legacy(self): + # API change in https://github.com/pandas-dev/pandas/pull/7096 + + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + msg = "return_type must be {'axes', 'dict', 'both'}" + with pytest.raises(ValueError, match=msg): + df.boxplot(return_type="NOT_A_TYPE") + + result = df.boxplot() + _check_box_return_type(result, "axes") + + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_boxplot_return_type_legacy_return_type(self, return_type): + # API change in https://github.com/pandas-dev/pandas/pull/7096 + + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + with tm.assert_produces_warning(False): + result = df.boxplot(return_type=return_type) + _check_box_return_type(result, return_type) + + def test_boxplot_axis_limits(self, hist_df): + df = hist_df.copy() + df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0]) + # One full row + height_ax, weight_ax = df.boxplot(["height", "weight"], by="category") + _check_ax_limits(df["height"], height_ax) + _check_ax_limits(df["weight"], weight_ax) + assert weight_ax._sharey == height_ax + + def test_boxplot_axis_limits_two_rows(self, hist_df): + df = hist_df.copy() + df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0]) + # Two rows, one partial + p = df.boxplot(["height", "weight", "age"], by="category") + height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0] + dummy_ax = p[1, 1] + + _check_ax_limits(df["height"], height_ax) + _check_ax_limits(df["weight"], weight_ax) + _check_ax_limits(df["age"], age_ax) + assert weight_ax._sharey == height_ax + assert age_ax._sharey == height_ax + assert dummy_ax._sharey is None + + def test_boxplot_empty_column(self): + df = DataFrame(np.random.default_rng(2).standard_normal((20, 4))) + df.loc[:, 0] = np.nan + _check_plot_works(df.boxplot, return_type="axes") + + def test_figsize(self): + df = DataFrame( + np.random.default_rng(2).random((10, 5)), columns=["A", "B", "C", "D", "E"] + ) + result = df.boxplot(return_type="axes", figsize=(12, 8)) + assert result.figure.bbox_inches.width == 12 + assert result.figure.bbox_inches.height == 8 + + def test_fontsize(self): + df = DataFrame({"a": [1, 2, 3, 4, 5, 6]}) + _check_ticks_props(df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16) + + def test_boxplot_numeric_data(self): + # GH 22799 + df = DataFrame( + { + "a": date_range("2012-01-01", periods=100), + "b": np.random.default_rng(2).standard_normal(100), + "c": np.random.default_rng(2).standard_normal(100) + 2, + "d": date_range("2012-01-01", periods=100).astype(str), + "e": date_range("2012-01-01", periods=100, tz="UTC"), + "f": timedelta_range("1 days", periods=100), + } + ) + ax = df.plot(kind="box") + assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"] + + @pytest.mark.parametrize( + "colors_kwd, expected", + [ + ( + {"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"}, + {"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"}, + ), + ({"boxes": "r"}, {"boxes": "r"}), + ("r", {"boxes": "r", "whiskers": "r", "medians": "r", "caps": "r"}), + ], + ) + def test_color_kwd(self, colors_kwd, expected): + # GH: 26214 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + result = df.boxplot(color=colors_kwd, return_type="dict") + for k, v in expected.items(): + assert result[k][0].get_color() == v + + @pytest.mark.parametrize( + "scheme,expected", + [ + ( + "dark_background", + { + "boxes": "#8dd3c7", + "whiskers": "#8dd3c7", + "medians": "#bfbbd9", + "caps": "#8dd3c7", + }, + ), + ( + "default", + { + "boxes": "#1f77b4", + "whiskers": "#1f77b4", + "medians": "#2ca02c", + "caps": "#1f77b4", + }, + ), + ], + ) + def test_colors_in_theme(self, scheme, expected): + # GH: 40769 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + import matplotlib.pyplot as plt + + plt.style.use(scheme) + result = df.plot.box(return_type="dict") + for k, v in expected.items(): + assert result[k][0].get_color() == v + + @pytest.mark.parametrize( + "dict_colors, msg", + [({"boxes": "r", "invalid_key": "r"}, "invalid key 'invalid_key'")], + ) + def test_color_kwd_errors(self, dict_colors, msg): + # GH: 26214 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + with pytest.raises(ValueError, match=msg): + df.boxplot(color=dict_colors, return_type="dict") + + @pytest.mark.parametrize( + "props, expected", + [ + ("boxprops", "boxes"), + ("whiskerprops", "whiskers"), + ("capprops", "caps"), + ("medianprops", "medians"), + ], + ) + def test_specified_props_kwd(self, props, expected): + # GH 30346 + df = DataFrame({k: np.random.default_rng(2).random(10) for k in "ABC"}) + kwd = {props: {"color": "C1"}} + result = df.boxplot(return_type="dict", **kwd) + + assert result[expected][0].get_color() == "C1" + + @pytest.mark.parametrize("vert", [True, False]) + def test_plot_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.plot(kind="box", vert=vert, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + + @pytest.mark.parametrize("vert", [True, False]) + def test_plot_box(self, vert): + # GH 54941 + rng = np.random.default_rng(2) + df1 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD")) + df2 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD")) + + xlabel, ylabel = "x", "y" + _, axs = plt.subplots(ncols=2, figsize=(10, 7), sharey=True) + df1.plot.box(ax=axs[0], vert=vert, xlabel=xlabel, ylabel=ylabel) + df2.plot.box(ax=axs[1], vert=vert, xlabel=xlabel, ylabel=ylabel) + for ax in axs: + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + mpl.pyplot.close() + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.boxplot(vert=vert, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_group_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.boxplot(by="group", vert=vert, xlabel=xlabel, ylabel=ylabel) + for subplot in ax: + assert subplot.get_xlabel() == xlabel + assert subplot.get_ylabel() == ylabel + mpl.pyplot.close() + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_group_no_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + ax = df.boxplot(by="group", vert=vert) + for subplot in ax: + target_label = subplot.get_xlabel() if vert else subplot.get_ylabel() + assert target_label == pprint_thing(["group"]) + mpl.pyplot.close() + + +class TestDataFrameGroupByPlots: + def test_boxplot_legacy1(self, hist_df): + grouped = hist_df.groupby(by="gender") + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(grouped.boxplot, return_type="axes") + _check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2)) + + def test_boxplot_legacy1_return_type(self, hist_df): + grouped = hist_df.groupby(by="gender") + axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.slow + def test_boxplot_legacy2(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + grouped = df.groupby(level=1) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(grouped.boxplot, return_type="axes") + _check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3)) + + @pytest.mark.slow + def test_boxplot_legacy2_return_type(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + grouped = df.groupby(level=1) + axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.parametrize( + "subplots, warn, axes_num, layout", + [[True, UserWarning, 3, (2, 2)], [False, None, 1, (1, 1)]], + ) + def test_boxplot_legacy3(self, subplots, warn, axes_num, layout): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = df.unstack(level=1).groupby(level=0, axis=1) + with tm.assert_produces_warning(warn, check_stacklevel=False): + axes = _check_plot_works( + grouped.boxplot, subplots=subplots, return_type="axes" + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + def test_grouped_plot_fignums(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + df = DataFrame({"height": height, "weight": weight, "gender": gender}) + gb = df.groupby("gender") + + res = gb.plot() + assert len(mpl.pyplot.get_fignums()) == 2 + assert len(res) == 2 + plt.close("all") + + res = gb.boxplot(return_type="axes") + assert len(mpl.pyplot.get_fignums()) == 1 + assert len(res) == 2 + + def test_grouped_plot_fignums_excluded_col(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + df = DataFrame({"height": height, "weight": weight, "gender": gender}) + # now works with GH 5610 as gender is excluded + df.groupby("gender").hist() + + @pytest.mark.slow + def test_grouped_box_return_type(self, hist_df): + df = hist_df + + # old style: return_type=None + result = df.boxplot(by="gender") + assert isinstance(result, np.ndarray) + _check_box_return_type( + result, None, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.slow + def test_grouped_box_return_type_groupby(self, hist_df): + df = hist_df + # now for groupby + result = df.groupby("gender").boxplot(return_type="dict") + _check_box_return_type(result, "dict", expected_keys=["Male", "Female"]) + + @pytest.mark.slow + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_grouped_box_return_type_arg(self, hist_df, return_type): + df = hist_df + + returned = df.groupby("classroom").boxplot(return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=["A", "B", "C"]) + + returned = df.boxplot(by="classroom", return_type=return_type) + _check_box_return_type( + returned, return_type, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.slow + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_grouped_box_return_type_arg_duplcate_cats(self, return_type): + columns2 = "X B C D A".split() + df2 = DataFrame( + np.random.default_rng(2).standard_normal((6, 5)), columns=columns2 + ) + categories2 = "A B".split() + df2["category"] = categories2 * 3 + + returned = df2.groupby("category").boxplot(return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=categories2) + + returned = df2.boxplot(by="category", return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=columns2) + + @pytest.mark.slow + def test_grouped_box_layout_too_small(self, hist_df): + df = hist_df + + msg = "Layout of 1x1 must be larger than required size 2" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1)) + + @pytest.mark.slow + def test_grouped_box_layout_needs_by(self, hist_df): + df = hist_df + msg = "The 'layout' keyword is not supported when 'by' is None" + with pytest.raises(ValueError, match=msg): + df.boxplot( + column=["height", "weight", "category"], + layout=(2, 1), + return_type="dict", + ) + + @pytest.mark.slow + def test_grouped_box_layout_positive_layout(self, hist_df): + df = hist_df + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "gb_key, axes_num, rows", + [["gender", 2, 1], ["category", 4, 2], ["classroom", 3, 2]], + ) + def test_grouped_box_layout_positive_layout_axes( + self, hist_df, gb_key, axes_num, rows + ): + df = hist_df + # _check_plot_works adds an ax so catch warning. see GH #13188 GH 6769 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works( + df.groupby(gb_key).boxplot, column="height", return_type="dict" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=axes_num, layout=(rows, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "col, visible", [["height", False], ["weight", True], ["category", True]] + ) + def test_grouped_box_layout_visible(self, hist_df, col, visible): + df = hist_df + # GH 5897 + axes = df.boxplot( + column=["height", "weight", "category"], by="gender", return_type="axes" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2)) + ax = axes[col] + _check_visible(ax.get_xticklabels(), visible=visible) + _check_visible([ax.xaxis.get_label()], visible=visible) + + @pytest.mark.slow + def test_grouped_box_layout_shape(self, hist_df): + df = hist_df + df.groupby("classroom").boxplot( + column=["height", "weight", "category"], return_type="dict" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize("cols", [2, -1]) + def test_grouped_box_layout_works(self, hist_df, cols): + df = hist_df + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works( + df.groupby("category").boxplot, + column="height", + layout=(3, cols), + return_type="dict", + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(3, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize("rows, res", [[4, 4], [-1, 3]]) + def test_grouped_box_layout_axes_shape_rows(self, hist_df, rows, res): + df = hist_df + df.boxplot( + column=["height", "weight", "category"], by="gender", layout=(rows, 1) + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(res, 1)) + + @pytest.mark.slow + @pytest.mark.parametrize("cols, res", [[4, 4], [-1, 3]]) + def test_grouped_box_layout_axes_shape_cols_groupby(self, hist_df, cols, res): + df = hist_df + df.groupby("classroom").boxplot( + column=["height", "weight", "category"], + layout=(1, cols), + return_type="dict", + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(1, res)) + + @pytest.mark.slow + def test_grouped_box_multiple_axes(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + # check warning to ignore sharex / sharey + # this check should be done in the first function which + # passes multiple axes to plot, hist or boxplot + # location should be changed if other test is added + # which has earlier alphabetical order + with tm.assert_produces_warning(UserWarning): + _, axes = mpl.pyplot.subplots(2, 2) + df.groupby("category").boxplot(column="height", return_type="axes", ax=axes) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(2, 2)) + + @pytest.mark.slow + def test_grouped_box_multiple_axes_on_fig(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + fig, axes = mpl.pyplot.subplots(2, 3) + with tm.assert_produces_warning(UserWarning): + returned = df.boxplot( + column=["height", "weight", "category"], + by="gender", + return_type="axes", + ax=axes[0], + ) + returned = np.array(list(returned.values)) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[0]) + assert returned[0].figure is fig + + # draw on second row + with tm.assert_produces_warning(UserWarning): + returned = df.groupby("classroom").boxplot( + column=["height", "weight", "category"], return_type="axes", ax=axes[1] + ) + returned = np.array(list(returned.values)) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[1]) + assert returned[0].figure is fig + + @pytest.mark.slow + def test_grouped_box_multiple_axes_ax_error(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + msg = "The number of passed axes must be 3, the same as the output plot" + with pytest.raises(ValueError, match=msg): + fig, axes = mpl.pyplot.subplots(2, 3) + # pass different number of axes from required + with tm.assert_produces_warning(UserWarning): + axes = df.groupby("classroom").boxplot(ax=axes) + + def test_fontsize(self): + df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}) + _check_ticks_props( + df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16 + ) + + @pytest.mark.parametrize( + "col, expected_xticklabel", + [ + ("v", ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]), + (["v"], ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]), + ("v1", ["(a, v1)", "(b, v1)", "(c, v1)", "(d, v1)", "(e, v1)"]), + ( + ["v", "v1"], + [ + "(a, v)", + "(a, v1)", + "(b, v)", + "(b, v1)", + "(c, v)", + "(c, v1)", + "(d, v)", + "(d, v1)", + "(e, v)", + "(e, v1)", + ], + ), + ( + None, + [ + "(a, v)", + "(a, v1)", + "(b, v)", + "(b, v1)", + "(c, v)", + "(c, v1)", + "(d, v)", + "(d, v1)", + "(e, v)", + "(e, v1)", + ], + ), + ], + ) + def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel): + # GH 16748 + df = DataFrame( + { + "cat": np.random.default_rng(2).choice(list("abcde"), 100), + "v": np.random.default_rng(2).random(100), + "v1": np.random.default_rng(2).random(100), + } + ) + grouped = df.groupby("cat") + + axes = _check_plot_works( + grouped.boxplot, subplots=False, column=col, return_type="axes" + ) + + result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] + assert expected_xticklabel == result_xticklabel + + def test_groupby_boxplot_object(self, hist_df): + # GH 43480 + df = hist_df.astype("object") + grouped = df.groupby("gender") + msg = "boxplot method requires numerical columns, nothing to plot" + with pytest.raises(ValueError, match=msg): + _check_plot_works(grouped.boxplot, subplots=False) + + def test_boxplot_multiindex_column(self): + # GH 16748 + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + tuples = list(zip(*arrays)) + index = MultiIndex.from_tuples(tuples, names=["first", "second"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 8)), + index=["A", "B", "C"], + columns=index, + ) + + col = [("bar", "one"), ("bar", "two")] + axes = _check_plot_works(df.boxplot, column=col, return_type="axes") + + expected_xticklabel = ["(bar, one)", "(bar, two)"] + result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] + assert expected_xticklabel == result_xticklabel diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..20daf5935624843af3224f991497f84fa6639a0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py @@ -0,0 +1,60 @@ +import pytest + +from pandas import DataFrame +from pandas.tests.plotting.common import ( + _check_plot_works, + _check_ticks_props, + _gen_two_subplots, +) + +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestCommon: + def test__check_ticks_props(self): + # GH 34768 + df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) + ax = _check_plot_works(df.plot, rot=30) + ax.yaxis.set_tick_params(rotation=30) + msg = "expected 0.00000 but got " + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, xrot=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, xlabelsize=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, yrot=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, ylabelsize=0) + + def test__gen_two_subplots_with_ax(self): + fig = plt.gcf() + gen = _gen_two_subplots(f=lambda **kwargs: None, fig=fig, ax="test") + # On the first yield, no subplot should be added since ax was passed + next(gen) + assert fig.get_axes() == [] + # On the second, the one axis should match fig.subplot(2, 1, 2) + next(gen) + axes = fig.get_axes() + assert len(axes) == 1 + subplot_geometry = list(axes[0].get_subplotspec().get_geometry()[:-1]) + subplot_geometry[-1] += 1 + assert subplot_geometry == [2, 1, 2] + + def test_colorbar_layout(self): + fig = plt.figure() + + axes = fig.subplot_mosaic( + """ + AB + CC + """ + ) + + x = [1, 2, 3] + y = [1, 2, 3] + + cs0 = axes["A"].scatter(x, y) + axes["B"].scatter(x, y) + + fig.colorbar(cs0, ax=[axes["A"], axes["B"]], location="right") + DataFrame(x).plot(ax=axes["C"]) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..f748d7c5fc758045fc5d3475b94e376a06f5269b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py @@ -0,0 +1,410 @@ +from datetime import ( + date, + datetime, +) +import subprocess +import sys + +import numpy as np +import pytest + +import pandas._config.config as cf + +from pandas._libs.tslibs import to_offset + +from pandas import ( + Index, + Period, + PeriodIndex, + Series, + Timestamp, + arrays, + date_range, +) +import pandas._testing as tm + +from pandas.plotting import ( + deregister_matplotlib_converters, + register_matplotlib_converters, +) +from pandas.tseries.offsets import ( + Day, + Micro, + Milli, + Second, +) + +try: + from pandas.plotting._matplotlib import converter +except ImportError: + # try / except, rather than skip, to avoid internal refactoring + # causing an improper skip + pass + +pytest.importorskip("matplotlib.pyplot") +dates = pytest.importorskip("matplotlib.dates") + + +@pytest.mark.single_cpu +def test_registry_mpl_resets(): + # Check that Matplotlib converters are properly reset (see issue #27481) + code = ( + "import matplotlib.units as units; " + "import matplotlib.dates as mdates; " + "n_conv = len(units.registry); " + "import pandas as pd; " + "pd.plotting.register_matplotlib_converters(); " + "pd.plotting.deregister_matplotlib_converters(); " + "assert len(units.registry) == n_conv" + ) + call = [sys.executable, "-c", code] + subprocess.check_output(call) + + +def test_timtetonum_accepts_unicode(): + assert converter.time2num("00:01") == converter.time2num("00:01") + + +class TestRegistration: + @pytest.mark.single_cpu + def test_dont_register_by_default(self): + # Run in subprocess to ensure a clean state + code = ( + "import matplotlib.units; " + "import pandas as pd; " + "units = dict(matplotlib.units.registry); " + "assert pd.Timestamp not in units" + ) + call = [sys.executable, "-c", code] + assert subprocess.check_call(call) == 0 + + def test_registering_no_warning(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + _, ax = plt.subplots() + + # Set to the "warn" state, in case this isn't the first test run + register_matplotlib_converters() + ax.plot(s.index, s.values) + plt.close() + + def test_pandas_plots_register(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + # Set to the "warn" state, in case this isn't the first test run + with tm.assert_produces_warning(None) as w: + s.plot() + + try: + assert len(w) == 0 + finally: + plt.close() + + def test_matplotlib_formatters(self): + units = pytest.importorskip("matplotlib.units") + + # Can't make any assertion about the start state. + # We we check that toggling converters off removes it, and toggling it + # on restores it. + + with cf.option_context("plotting.matplotlib.register_converters", True): + with cf.option_context("plotting.matplotlib.register_converters", False): + assert Timestamp not in units.registry + assert Timestamp in units.registry + + def test_option_no_warning(self): + pytest.importorskip("matplotlib.pyplot") + ctx = cf.option_context("plotting.matplotlib.register_converters", False) + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + _, ax = plt.subplots() + + # Test without registering first, no warning + with ctx: + ax.plot(s.index, s.values) + + # Now test with registering + register_matplotlib_converters() + with ctx: + ax.plot(s.index, s.values) + plt.close() + + def test_registry_resets(self): + units = pytest.importorskip("matplotlib.units") + dates = pytest.importorskip("matplotlib.dates") + + # make a copy, to reset to + original = dict(units.registry) + + try: + # get to a known state + units.registry.clear() + date_converter = dates.DateConverter() + units.registry[datetime] = date_converter + units.registry[date] = date_converter + + register_matplotlib_converters() + assert units.registry[date] is not date_converter + deregister_matplotlib_converters() + assert units.registry[date] is date_converter + + finally: + # restore original stater + units.registry.clear() + for k, v in original.items(): + units.registry[k] = v + + +class TestDateTimeConverter: + @pytest.fixture + def dtc(self): + return converter.DatetimeConverter() + + def test_convert_accepts_unicode(self, dtc): + r1 = dtc.convert("2000-01-01 12:22", None, None) + r2 = dtc.convert("2000-01-01 12:22", None, None) + assert r1 == r2, "DatetimeConverter.convert should accept unicode" + + def test_conversion(self, dtc): + rs = dtc.convert(["2012-1-1"], None, None)[0] + xp = dates.date2num(datetime(2012, 1, 1)) + assert rs == xp + + rs = dtc.convert("2012-1-1", None, None) + assert rs == xp + + rs = dtc.convert(date(2012, 1, 1), None, None) + assert rs == xp + + rs = dtc.convert("2012-1-1", None, None) + assert rs == xp + + rs = dtc.convert(Timestamp("2012-1-1"), None, None) + assert rs == xp + + # also testing datetime64 dtype (GH8614) + rs = dtc.convert("2012-01-01", None, None) + assert rs == xp + + rs = dtc.convert("2012-01-01 00:00:00+0000", None, None) + assert rs == xp + + rs = dtc.convert( + np.array(["2012-01-01 00:00:00+0000", "2012-01-02 00:00:00+0000"]), + None, + None, + ) + assert rs[0] == xp + + # we have a tz-aware date (constructed to that when we turn to utc it + # is the same as our sample) + ts = Timestamp("2012-01-01").tz_localize("UTC").tz_convert("US/Eastern") + rs = dtc.convert(ts, None, None) + assert rs == xp + + rs = dtc.convert(ts.to_pydatetime(), None, None) + assert rs == xp + + rs = dtc.convert(Index([ts - Day(1), ts]), None, None) + assert rs[1] == xp + + rs = dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(), None, None) + assert rs[1] == xp + + def test_conversion_float(self, dtc): + rtol = 0.5 * 10**-9 + + rs = dtc.convert(Timestamp("2012-1-1 01:02:03", tz="UTC"), None, None) + xp = converter.mdates.date2num(Timestamp("2012-1-1 01:02:03", tz="UTC")) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + rs = dtc.convert( + Timestamp("2012-1-1 09:02:03", tz="Asia/Hong_Kong"), None, None + ) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + rs = dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + @pytest.mark.parametrize( + "values", + [ + [date(1677, 1, 1), date(1677, 1, 2)], + [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)], + ], + ) + def test_conversion_outofbounds_datetime(self, dtc, values): + # 2579 + rs = dtc.convert(values, None, None) + xp = converter.mdates.date2num(values) + tm.assert_numpy_array_equal(rs, xp) + rs = dtc.convert(values[0], None, None) + xp = converter.mdates.date2num(values[0]) + assert rs == xp + + @pytest.mark.parametrize( + "time,format_expected", + [ + (0, "00:00"), # time2num(datetime.time.min) + (86399.999999, "23:59:59.999999"), # time2num(datetime.time.max) + (90000, "01:00"), + (3723, "01:02:03"), + (39723.2, "11:02:03.200"), + ], + ) + def test_time_formatter(self, time, format_expected): + # issue 18478 + result = converter.TimeFormatter(None)(time) + assert result == format_expected + + @pytest.mark.parametrize("freq", ("B", "ms", "s")) + def test_dateindex_conversion(self, freq, dtc): + rtol = 10**-9 + dateindex = date_range("2020-01-01", periods=10, freq=freq) + rs = dtc.convert(dateindex, None, None) + xp = converter.mdates.date2num(dateindex._mpl_repr()) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + @pytest.mark.parametrize("offset", [Second(), Milli(), Micro(50)]) + def test_resolution(self, offset, dtc): + # Matplotlib's time representation using floats cannot distinguish + # intervals smaller than ~10 microsecond in the common range of years. + ts1 = Timestamp("2012-1-1") + ts2 = ts1 + offset + val1 = dtc.convert(ts1, None, None) + val2 = dtc.convert(ts2, None, None) + if not val1 < val2: + raise AssertionError(f"{val1} is not less than {val2}.") + + def test_convert_nested(self, dtc): + inner = [Timestamp("2017-01-01"), Timestamp("2017-01-02")] + data = [inner, inner] + result = dtc.convert(data, None, None) + expected = [dtc.convert(x, None, None) for x in data] + assert (np.array(result) == expected).all() + + +class TestPeriodConverter: + @pytest.fixture + def pc(self): + return converter.PeriodConverter() + + @pytest.fixture + def axis(self): + class Axis: + pass + + axis = Axis() + axis.freq = "D" + return axis + + def test_convert_accepts_unicode(self, pc, axis): + r1 = pc.convert("2012-1-1", None, axis) + r2 = pc.convert("2012-1-1", None, axis) + assert r1 == r2 + + def test_conversion(self, pc, axis): + rs = pc.convert(["2012-1-1"], None, axis)[0] + xp = Period("2012-1-1").ordinal + assert rs == xp + + rs = pc.convert("2012-1-1", None, axis) + assert rs == xp + + rs = pc.convert([date(2012, 1, 1)], None, axis)[0] + assert rs == xp + + rs = pc.convert(date(2012, 1, 1), None, axis) + assert rs == xp + + rs = pc.convert([Timestamp("2012-1-1")], None, axis)[0] + assert rs == xp + + rs = pc.convert(Timestamp("2012-1-1"), None, axis) + assert rs == xp + + rs = pc.convert("2012-01-01", None, axis) + assert rs == xp + + rs = pc.convert("2012-01-01 00:00:00+0000", None, axis) + assert rs == xp + + rs = pc.convert( + np.array( + ["2012-01-01 00:00:00", "2012-01-02 00:00:00"], + dtype="datetime64[ns]", + ), + None, + axis, + ) + assert rs[0] == xp + + def test_integer_passthrough(self, pc, axis): + # GH9012 + rs = pc.convert([0, 1], None, axis) + xp = [0, 1] + assert rs == xp + + def test_convert_nested(self, pc, axis): + data = ["2012-1-1", "2012-1-2"] + r1 = pc.convert([data, data], None, axis) + r2 = [pc.convert(data, None, axis) for _ in range(2)] + assert r1 == r2 + + +class TestTimeDeltaConverter: + """Test timedelta converter""" + + @pytest.mark.parametrize( + "x, decimal, format_expected", + [ + (0.0, 0, "00:00:00"), + (3972320000000, 1, "01:06:12.3"), + (713233432000000, 2, "8 days 06:07:13.43"), + (32423432000000, 4, "09:00:23.4320"), + ], + ) + def test_format_timedelta_ticks(self, x, decimal, format_expected): + tdc = converter.TimeSeries_TimedeltaFormatter + result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal) + assert result == format_expected + + @pytest.mark.parametrize("view_interval", [(1, 2), (2, 1)]) + def test_call_w_different_view_intervals(self, view_interval, monkeypatch): + # previously broke on reversed xlmits; see GH37454 + class mock_axis: + def get_view_interval(self): + return view_interval + + tdc = converter.TimeSeries_TimedeltaFormatter() + monkeypatch.setattr(tdc, "axis", mock_axis()) + tdc(0.0, 0) + + +@pytest.mark.parametrize("year_span", [11.25, 30, 80, 150, 400, 800, 1500, 2500, 3500]) +# The range is limited to 11.25 at the bottom by if statements in +# the _quarterly_finder() function +def test_quarterly_finder(year_span): + vmin = -1000 + vmax = vmin + year_span * 4 + span = vmax - vmin + 1 + if span < 45: + pytest.skip("the quarterly finder is only invoked if the span is >= 45") + nyears = span / 4 + (min_anndef, maj_anndef) = converter._get_default_annual_spacing(nyears) + result = converter._quarterly_finder(vmin, vmax, to_offset("QE")) + quarters = PeriodIndex( + arrays.PeriodArray(np.array([x[0] for x in result]), dtype="period[Q]") + ) + majors = np.array([x[1] for x in result]) + minors = np.array([x[2] for x in result]) + major_quarters = quarters[majors] + minor_quarters = quarters[minors] + check_major_years = major_quarters.year % maj_anndef == 0 + check_minor_years = minor_quarters.year % min_anndef == 0 + check_major_quarters = major_quarters.quarter == 1 + check_minor_quarters = minor_quarters.quarter == 1 + assert np.all(check_major_years) + assert np.all(check_minor_years) + assert np.all(check_major_quarters) + assert np.all(check_minor_quarters) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py new file mode 100644 index 0000000000000000000000000000000000000000..112172656b6ecde6a94282f58a8085751085fc44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py @@ -0,0 +1,1754 @@ +""" Test cases for time series specific (freq conversion, etc) """ +from datetime import ( + date, + datetime, + time, + timedelta, +) +import pickle + +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + BaseOffset, + to_offset, +) +from pandas._libs.tslibs.dtypes import freq_to_period_freqstr + +from pandas import ( + DataFrame, + Index, + NaT, + Series, + concat, + isna, + to_datetime, +) +import pandas._testing as tm +from pandas.core.indexes.datetimes import ( + DatetimeIndex, + bdate_range, + date_range, +) +from pandas.core.indexes.period import ( + Period, + PeriodIndex, + period_range, +) +from pandas.core.indexes.timedeltas import timedelta_range +from pandas.tests.plotting.common import _check_ticks_props + +from pandas.tseries.offsets import WeekOfMonth + +mpl = pytest.importorskip("matplotlib") + + +class TestTSPlot: + @pytest.mark.filterwarnings("ignore::UserWarning") + def test_ts_plot_with_tz(self, tz_aware_fixture): + # GH2877, GH17173, GH31205, GH31580 + tz = tz_aware_fixture + index = date_range("1/1/2011", periods=2, freq="h", tz=tz) + ts = Series([188.5, 328.25], index=index) + _check_plot_works(ts.plot) + ax = ts.plot() + xdata = next(iter(ax.get_lines())).get_xdata() + # Check first and last points' labels are correct + assert (xdata[0].hour, xdata[0].minute) == (0, 0) + assert (xdata[-1].hour, xdata[-1].minute) == (1, 0) + + def test_fontsize_set_correctly(self): + # For issue #8765 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 9)), index=range(10) + ) + _, ax = mpl.pyplot.subplots() + df.plot(fontsize=2, ax=ax) + for label in ax.get_xticklabels() + ax.get_yticklabels(): + assert label.get_fontsize() == 2 + + def test_frame_inferred(self): + # inferred freq + idx = date_range("1/1/1987", freq="MS", periods=100) + idx = DatetimeIndex(idx.values, freq=None) + + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df.plot) + + # axes freq + idx = idx[0:40].union(idx[45:99]) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df2.plot) + + def test_frame_inferred_n_gt_1(self): + # N > 1 + idx = date_range("2008-1-1 00:15:00", freq="15min", periods=10) + idx = DatetimeIndex(idx.values, freq=None) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df.plot) + + def test_is_error_nozeroindex(self): + # GH11858 + i = np.array([1, 2, 3]) + a = DataFrame(i, index=i) + _check_plot_works(a.plot, xerr=a) + _check_plot_works(a.plot, yerr=a) + + def test_nonnumeric_exclude(self): + idx = date_range("1/1/1987", freq="YE", periods=3) + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) + + fig, ax = mpl.pyplot.subplots() + df.plot(ax=ax) # it works + assert len(ax.get_lines()) == 1 # B was plotted + mpl.pyplot.close(fig) + + def test_nonnumeric_exclude_error(self): + idx = date_range("1/1/1987", freq="YE", periods=3) + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df["A"].plot() + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "M", "Q", "Y"]) + def test_tsplot_period(self, freq): + idx = period_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(ser.plot, ax=ax) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_tsplot_datetime(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(ser.plot, ax=ax) + + def test_tsplot(self): + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + _, ax = mpl.pyplot.subplots() + ts.plot(style="k", ax=ax) + color = (0.0, 0.0, 0.0, 1) + assert color == ax.get_lines()[0].get_color() + + def test_both_style_and_color(self): + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' " + "keyword argument. Please use one or the other or pass 'style' " + "without a color symbol" + ) + with pytest.raises(ValueError, match=msg): + ts.plot(style="b-", color="#000099") + + s = ts.reset_index(drop=True) + with pytest.raises(ValueError, match=msg): + s.plot(style="b-", color="#000099") + + @pytest.mark.parametrize("freq", ["ms", "us"]) + def test_high_freq(self, freq): + _, ax = mpl.pyplot.subplots() + rng = date_range("1/1/2012", periods=100, freq=freq) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _check_plot_works(ser.plot, ax=ax) + + def test_get_datevalue(self): + from pandas.plotting._matplotlib.converter import get_datevalue + + assert get_datevalue(None, "D") is None + assert get_datevalue(1987, "Y") == 1987 + assert get_datevalue(Period(1987, "Y"), "M") == Period("1987-12", "M").ordinal + assert get_datevalue("1/1/1987", "D") == Period("1987-1-1", "D").ordinal + + def test_ts_plot_format_coord(self): + def check_format_of_first_point(ax, expected_string): + first_line = ax.get_lines()[0] + first_x = first_line.get_xdata()[0].ordinal + first_y = first_line.get_ydata()[0] + assert expected_string == ax.format_coord(first_x, first_y) + + annual = Series(1, index=date_range("2014-01-01", periods=3, freq="YE-DEC")) + _, ax = mpl.pyplot.subplots() + annual.plot(ax=ax) + check_format_of_first_point(ax, "t = 2014 y = 1.000000") + + # note this is added to the annual plot already in existence, and + # changes its freq field + daily = Series(1, index=date_range("2014-01-01", periods=3, freq="D")) + daily.plot(ax=ax) + check_format_of_first_point(ax, "t = 2014-01-01 y = 1.000000") + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "M", "Q", "Y"]) + def test_line_plot_period_series(self, freq): + idx = period_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(ser.plot, ser.index.freq) + + @pytest.mark.parametrize( + "frqncy", ["1s", "3s", "5min", "7h", "4D", "8W", "11M", "3Y"] + ) + def test_line_plot_period_mlt_series(self, frqncy): + # test period index line plot for series with multiples (`mlt`) of the + # frequency (`frqncy`) rule code. tests resolution of issue #14763 + idx = period_range("12/31/1999", freq=frqncy, periods=100) + s = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(s.plot, s.index.freq.rule_code) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_datetime_series(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(ser.plot, ser.index.freq.rule_code) + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "ME", "QE", "YE"]) + def test_line_plot_period_frame(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + _check_plot_works(df.plot, df.index.freq) + + @pytest.mark.parametrize( + "frqncy", ["1s", "3s", "5min", "7h", "4D", "8W", "11M", "3Y"] + ) + def test_line_plot_period_mlt_frame(self, frqncy): + # test period index line plot for DataFrames with multiples (`mlt`) + # of the frequency (`frqncy`) rule code. tests resolution of issue + # #14763 + idx = period_range("12/31/1999", freq=frqncy, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + freq = freq_to_period_freqstr(1, df.index.freq.rule_code) + freq = df.index.asfreq(freq).freq + _check_plot_works(df.plot, freq) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_datetime_frame(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + freq = freq_to_period_freqstr(1, df.index.freq.rule_code) + freq = df.index.to_period(freq).freq + _check_plot_works(df.plot, freq) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_inferred_freq(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + ser = Series(ser.values, Index(np.asarray(ser.index))) + _check_plot_works(ser.plot, ser.index.inferred_freq) + + ser = ser.iloc[[0, 3, 5, 6]] + _check_plot_works(ser.plot) + + def test_fake_inferred_business(self): + _, ax = mpl.pyplot.subplots() + rng = date_range("2001-1-1", "2001-1-10") + ts = Series(range(len(rng)), index=rng) + ts = concat([ts[:3], ts[5:]]) + ts.plot(ax=ax) + assert not hasattr(ax, "freq") + + def test_plot_offset_freq(self): + ser = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + _check_plot_works(ser.plot) + + def test_plot_offset_freq_business(self): + dr = date_range("2023-01-01", freq="BQS", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) + _check_plot_works(ser.plot) + + def test_plot_multiple_inferred_freq(self): + dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(2000, 1, 11)]) + ser = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) + _check_plot_works(ser.plot) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_uhf(self): + import pandas.plotting._matplotlib.converter as conv + + idx = date_range("2012-6-22 21:59:51.960928", freq="ms", periods=500) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + axis = ax.get_xaxis() + + tlocs = axis.get_ticklocs() + tlabels = axis.get_ticklabels() + for loc, label in zip(tlocs, tlabels): + xp = conv._from_ordinal(loc).strftime("%H:%M:%S.%f") + rs = str(label.get_text()) + if len(rs): + assert xp == rs + + def test_irreg_hf(self): + idx = date_range("2012-6-22 21:59:51", freq="s", periods=10) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + + irreg = df.iloc[[0, 1, 3, 4]] + _, ax = mpl.pyplot.subplots() + irreg.plot(ax=ax) + diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff() + + sec = 1.0 / 24 / 60 / 60 + assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all() + + def test_irreg_hf_object(self): + idx = date_range("2012-6-22 21:59:51", freq="s", periods=10) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + _, ax = mpl.pyplot.subplots() + df2.index = df2.index.astype(object) + df2.plot(ax=ax) + diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff() + sec = 1.0 / 24 / 60 / 60 + assert (np.fabs(diffs[1:] - sec) < 1e-8).all() + + def test_irregular_datetime64_repr_bug(self): + ser = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + ser = ser.iloc[[0, 1, 2, 7]] + + _, ax = mpl.pyplot.subplots() + + ret = ser.plot(ax=ax) + assert ret is not None + + for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index): + assert rs == xp + + def test_business_freq(self): + bts = Series(range(5), period_range("2020-01-01", periods=5)) + msg = r"PeriodDtype\[B\] is deprecated" + dt = bts.index[0].to_timestamp() + with tm.assert_produces_warning(FutureWarning, match=msg): + bts.index = period_range(start=dt, periods=len(bts), freq="B") + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal + idx = ax.get_lines()[0].get_xdata() + with tm.assert_produces_warning(FutureWarning, match=msg): + assert PeriodIndex(data=idx).freqstr == "B" + + def test_business_freq_convert(self): + bts = Series( + np.arange(300, dtype=np.float64), + index=date_range("2020-01-01", periods=300, freq="B"), + ).asfreq("BME") + ts = bts.to_period("M") + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal + idx = ax.get_lines()[0].get_xdata() + assert PeriodIndex(data=idx).freqstr == "M" + + def test_freq_with_no_period_alias(self): + # GH34487 + freq = WeekOfMonth() + bts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ).asfreq(freq) + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + + idx = ax.get_lines()[0].get_xdata() + msg = "freq not specified and cannot be inferred" + with pytest.raises(ValueError, match=msg): + PeriodIndex(data=idx) + + def test_nonzero_base(self): + # GH2571 + idx = date_range("2012-12-20", periods=24, freq="h") + timedelta(minutes=30) + df = DataFrame(np.arange(24), index=idx) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + rs = ax.get_lines()[0].get_xdata() + assert not Index(rs).is_normalized + + def test_dataframe(self): + bts = DataFrame( + { + "a": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ) + } + ) + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + idx = ax.get_lines()[0].get_xdata() + tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx)) + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + @pytest.mark.parametrize( + "obj", + [ + Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ), + DataFrame( + { + "a": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ), + "b": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ) + + 1, + } + ), + ], + ) + def test_axis_limits(self, obj): + _, ax = mpl.pyplot.subplots() + obj.plot(ax=ax) + xlim = ax.get_xlim() + ax.set_xlim(xlim[0] - 5, xlim[1] + 10) + result = ax.get_xlim() + assert result[0] == xlim[0] - 5 + assert result[1] == xlim[1] + 10 + + # string + expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq)) + ax.set_xlim("1/1/2000", "4/1/2000") + result = ax.get_xlim() + assert int(result[0]) == expected[0].ordinal + assert int(result[1]) == expected[1].ordinal + + # datetime + expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq)) + ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1)) + result = ax.get_xlim() + assert int(result[0]) == expected[0].ordinal + assert int(result[1]) == expected[1].ordinal + fig = ax.get_figure() + mpl.pyplot.close(fig) + + def test_get_finder(self): + import pandas.plotting._matplotlib.converter as conv + + assert conv.get_finder(to_offset("B")) == conv._daily_finder + assert conv.get_finder(to_offset("D")) == conv._daily_finder + assert conv.get_finder(to_offset("ME")) == conv._monthly_finder + assert conv.get_finder(to_offset("QE")) == conv._quarterly_finder + assert conv.get_finder(to_offset("YE")) == conv._annual_finder + assert conv.get_finder(to_offset("W")) == conv._daily_finder + + def test_finder_daily(self): + day_lst = [10, 40, 252, 400, 950, 2750, 10000] + + msg = "Period with BDay freq is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + xpl1 = xpl2 = [Period("1999-1-1", freq="B").ordinal] * len(day_lst) + rs1 = [] + rs2 = [] + for n in day_lst: + rng = bdate_range("1999-1-1", periods=n) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + vmin, vmax = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_quarterly(self): + yrs = [3.5, 11] + + xpl1 = xpl2 = [Period("1988Q1").ordinal] * len(yrs) + rs1 = [] + rs2 = [] + for n in yrs: + rng = period_range("1987Q2", periods=int(n * 4), freq="Q") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + (vmin, vmax) = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_monthly(self): + yrs = [1.15, 2.5, 4, 11] + + xpl1 = xpl2 = [Period("Jan 1988").ordinal] * len(yrs) + rs1 = [] + rs2 = [] + for n in yrs: + rng = period_range("1987Q2", periods=int(n * 12), freq="M") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + vmin, vmax = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_monthly_long(self): + rng = period_range("1988Q1", periods=24 * 12, freq="M") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1989Q1", "M").ordinal + assert rs == xp + + def test_finder_annual(self): + xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] + xp = [Period(x, freq="Y").ordinal for x in xp] + rs = [] + for nyears in [5, 10, 19, 49, 99, 199, 599, 1001]: + rng = period_range("1987", periods=nyears, freq="Y") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs == xp + + @pytest.mark.slow + def test_finder_minutely(self): + nminutes = 50 * 24 * 60 + rng = date_range("1/1/1999", freq="Min", periods=nminutes) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1/1/1999", freq="Min").ordinal + + assert rs == xp + + def test_finder_hourly(self): + nhours = 23 + rng = date_range("1/1/1999", freq="h", periods=nhours) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1/1/1999", freq="h").ordinal + + assert rs == xp + + def test_gaps(self): + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + ts.iloc[5:25] = np.nan + _, ax = mpl.pyplot.subplots() + ts.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[5:25, 1].all() + mpl.pyplot.close(ax.get_figure()) + + def test_gaps_irregular(self): + # irregular + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + ts = ts.iloc[[0, 1, 2, 5, 7, 9, 12, 15, 20]] + ts.iloc[2:5] = np.nan + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[2:5, 1].all() + mpl.pyplot.close(ax.get_figure()) + + def test_gaps_non_ts(self): + # non-ts + idx = [0, 1, 2, 5, 7, 9, 12, 15, 20] + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + ser.iloc[2:5] = np.nan + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[2:5, 1].all() + + def test_gap_upsample(self): + low = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + low.iloc[5:25] = np.nan + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + + idxh = date_range(low.index[0], low.index[-1], freq="12h") + s = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + s.plot(secondary_y=True) + lines = ax.get_lines() + assert len(lines) == 1 + assert len(ax.right_ax.get_lines()) == 1 + + line = lines[0] + data = line.get_xydata() + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[5:25, 1].all() + + def test_secondary_y(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, _ = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + line = ax.get_lines()[0] + xp = Series(line.get_ydata(), line.get_xdata()) + tm.assert_series_equal(ser, xp) + assert ax.get_yaxis().get_ticks_position() == "right" + assert not axes[0].get_yaxis().get_visible() + mpl.pyplot.close(fig) + + def test_secondary_y_yaxis(self): + Series(np.random.default_rng(2).standard_normal(10)) + ser2 = Series(np.random.default_rng(2).standard_normal(10)) + _, ax2 = mpl.pyplot.subplots() + ser2.plot(ax=ax2) + assert ax2.get_yaxis().get_ticks_position() == "left" + mpl.pyplot.close(ax2.get_figure()) + + def test_secondary_both(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + ser2 = Series(np.random.default_rng(2).standard_normal(10)) + ax = ser2.plot() + ax2 = ser.plot(secondary_y=True) + assert ax.get_yaxis().get_visible() + assert not hasattr(ax, "left_ax") + assert hasattr(ax, "right_ax") + assert hasattr(ax2, "left_ax") + assert not hasattr(ax2, "right_ax") + + def test_secondary_y_ts(self): + idx = date_range("1/1/2000", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(10), idx) + fig, _ = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + line = ax.get_lines()[0] + xp = Series(line.get_ydata(), line.get_xdata()).to_timestamp() + tm.assert_series_equal(ser, xp) + assert ax.get_yaxis().get_ticks_position() == "right" + assert not axes[0].get_yaxis().get_visible() + mpl.pyplot.close(fig) + + def test_secondary_y_ts_yaxis(self): + idx = date_range("1/1/2000", periods=10) + ser2 = Series(np.random.default_rng(2).standard_normal(10), idx) + _, ax2 = mpl.pyplot.subplots() + ser2.plot(ax=ax2) + assert ax2.get_yaxis().get_ticks_position() == "left" + mpl.pyplot.close(ax2.get_figure()) + + def test_secondary_y_ts_visible(self): + idx = date_range("1/1/2000", periods=10) + ser2 = Series(np.random.default_rng(2).standard_normal(10), idx) + ax = ser2.plot() + assert ax.get_yaxis().get_visible() + + def test_secondary_kde(self): + pytest.importorskip("scipy") + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, ax = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True, kind="density", ax=ax) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + assert axes[1].get_yaxis().get_ticks_position() == "right" + + def test_secondary_bar(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, ax = mpl.pyplot.subplots() + ser.plot(secondary_y=True, kind="bar", ax=ax) + axes = fig.get_axes() + assert axes[1].get_yaxis().get_ticks_position() == "right" + + def test_secondary_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=["a", "b", "c"] + ) + axes = df.plot(secondary_y=["a", "c"], subplots=True) + assert axes[0].get_yaxis().get_ticks_position() == "right" + assert axes[1].get_yaxis().get_ticks_position() == "left" + assert axes[2].get_yaxis().get_ticks_position() == "right" + + def test_secondary_bar_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=["a", "b", "c"] + ) + axes = df.plot(kind="bar", secondary_y=["a", "c"], subplots=True) + assert axes[0].get_yaxis().get_ticks_position() == "right" + assert axes[1].get_yaxis().get_ticks_position() == "left" + assert axes[2].get_yaxis().get_ticks_position() == "right" + + def test_mixed_freq_regular_first(self): + # TODO + s1 = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20, freq="B"), + ) + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15]] + + # it works! + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + + ax2 = s2.plot(style="g", ax=ax) + lines = ax2.get_lines() + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx1 = PeriodIndex(lines[0].get_xdata()) + idx2 = PeriodIndex(lines[1].get_xdata()) + + tm.assert_index_equal(idx1, s1.index.to_period("B")) + tm.assert_index_equal(idx2, s2.index.to_period("B")) + + left, right = ax2.get_xlim() + pidx = s1.index.to_period() + assert left <= pidx[0].ordinal + assert right >= pidx[-1].ordinal + + def test_mixed_freq_irregular_first(self): + s1 = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15]] + _, ax = mpl.pyplot.subplots() + s2.plot(style="g", ax=ax) + s1.plot(ax=ax) + assert not hasattr(ax, "freq") + lines = ax.get_lines() + x1 = lines[0].get_xdata() + tm.assert_numpy_array_equal(x1, s2.index.astype(object).values) + x2 = lines[1].get_xdata() + tm.assert_numpy_array_equal(x2, s1.index.astype(object).values) + + def test_mixed_freq_regular_first_df(self): + # GH 9852 + s1 = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20, freq="B"), + ).to_frame() + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + ax2 = s2.plot(style="g", ax=ax) + lines = ax2.get_lines() + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx1 = PeriodIndex(lines[0].get_xdata()) + idx2 = PeriodIndex(lines[1].get_xdata()) + assert idx1.equals(s1.index.to_period("B")) + assert idx2.equals(s2.index.to_period("B")) + left, right = ax2.get_xlim() + pidx = s1.index.to_period() + assert left <= pidx[0].ordinal + assert right >= pidx[-1].ordinal + + def test_mixed_freq_irregular_first_df(self): + # GH 9852 + s1 = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ).to_frame() + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] + _, ax = mpl.pyplot.subplots() + s2.plot(style="g", ax=ax) + s1.plot(ax=ax) + assert not hasattr(ax, "freq") + lines = ax.get_lines() + x1 = lines[0].get_xdata() + tm.assert_numpy_array_equal(x1, s2.index.astype(object).values) + x2 = lines[1].get_xdata() + tm.assert_numpy_array_equal(x2, s1.index.astype(object).values) + + def test_mixed_freq_hf_first(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "D" + + def test_mixed_freq_alignment(self): + ts_ind = date_range("2012-01-01 13:00", "2012-01-02", freq="h") + ts_data = np.random.default_rng(2).standard_normal(12) + + ts = Series(ts_data, index=ts_ind) + ts2 = ts.asfreq("min").interpolate() + + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax) + ts2.plot(style="r", ax=ax) + + assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0] + + def test_mixed_freq_lf_first(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(legend=True, ax=ax) + high.plot(legend=True, ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "D" + leg = ax.get_legend() + assert len(leg.texts) == 2 + mpl.pyplot.close(ax.get_figure()) + + def test_mixed_freq_lf_first_hourly(self): + idxh = date_range("1/1/1999", periods=240, freq="min") + idxl = date_range("1/1/1999", periods=4, freq="h") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "min" + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_mixed_freq_irreg_period(self): + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + irreg = ts.iloc[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]] + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + rng = period_range("1/3/2000", periods=30, freq="B") + ps = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + irreg.plot(ax=ax) + ps.plot(ax=ax) + + def test_mixed_freq_shared_ax(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="ME") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + + _, (ax1, ax2) = mpl.pyplot.subplots(nrows=2, sharex=True) + s1.plot(ax=ax1) + s2.plot(ax=ax2) + + assert ax1.freq == "M" + assert ax2.freq == "M" + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + def test_mixed_freq_shared_ax_twin_x(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="ME") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + # using twinx + _, ax1 = mpl.pyplot.subplots() + ax2 = ax1.twinx() + s1.plot(ax=ax1) + s2.plot(ax=ax2) + + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + @pytest.mark.xfail(reason="TODO (GH14330, GH14322)") + def test_mixed_freq_shared_ax_twin_x_irregular_first(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="M") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + _, ax1 = mpl.pyplot.subplots() + ax2 = ax1.twinx() + s2.plot(ax=ax1) + s1.plot(ax=ax2) + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + def test_nat_handling(self): + _, ax = mpl.pyplot.subplots() + + dti = DatetimeIndex(["2015-01-01", NaT, "2015-01-03"]) + s = Series(range(len(dti)), dti) + s.plot(ax=ax) + xdata = ax.get_lines()[0].get_xdata() + # plot x data is bounded by index values + assert s.index.min() <= Series(xdata).min() + assert Series(xdata).max() <= s.index.max() + + def test_to_weekly_resampling_disallow_how_kwd(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + + msg = ( + "'how' is not a valid keyword for plotting functions. If plotting " + "multiple objects on shared axes, resample manually first." + ) + with pytest.raises(ValueError, match=msg): + low.plot(ax=ax, how="foo") + + def test_to_weekly_resampling(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + + def test_from_weekly_resampling(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + + expected_h = idxh.to_period().asi8.astype(np.float64) + expected_l = np.array( + [1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562], + dtype=np.float64, + ) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + xdata = line.get_xdata(orig=False) + if len(xdata) == 12: # idxl lines + tm.assert_numpy_array_equal(xdata, expected_l) + else: + tm.assert_numpy_array_equal(xdata, expected_h) + + @pytest.mark.parametrize("kind1, kind2", [("line", "area"), ("area", "line")]) + def test_from_resampling_area_line_mixed(self, kind1, kind2): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = DataFrame( + np.random.default_rng(2).random((len(idxh), 3)), + index=idxh, + columns=[0, 1, 2], + ) + low = DataFrame( + np.random.default_rng(2).random((len(idxl), 3)), + index=idxl, + columns=[0, 1, 2], + ) + + _, ax = mpl.pyplot.subplots() + low.plot(kind=kind1, stacked=True, ax=ax) + high.plot(kind=kind2, stacked=True, ax=ax) + + # check low dataframe result + expected_x = np.array( + [ + 1514, + 1519, + 1523, + 1527, + 1531, + 1536, + 1540, + 1544, + 1549, + 1553, + 1558, + 1562, + ], + dtype=np.float64, + ) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[i] + assert PeriodIndex(line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + # check stacked values are correct + expected_y += low[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + # check high dataframe result + expected_x = idxh.to_period().asi8.astype(np.float64) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[3 + i] + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + expected_y += high[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + @pytest.mark.parametrize("kind1, kind2", [("line", "area"), ("area", "line")]) + def test_from_resampling_area_line_mixed_high_to_low(self, kind1, kind2): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = DataFrame( + np.random.default_rng(2).random((len(idxh), 3)), + index=idxh, + columns=[0, 1, 2], + ) + low = DataFrame( + np.random.default_rng(2).random((len(idxl), 3)), + index=idxl, + columns=[0, 1, 2], + ) + _, ax = mpl.pyplot.subplots() + high.plot(kind=kind1, stacked=True, ax=ax) + low.plot(kind=kind2, stacked=True, ax=ax) + + # check high dataframe result + expected_x = idxh.to_period().asi8.astype(np.float64) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[i] + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + expected_y += high[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + # check low dataframe result + expected_x = np.array( + [ + 1514, + 1519, + 1523, + 1527, + 1531, + 1536, + 1540, + 1544, + 1549, + 1553, + 1558, + 1562, + ], + dtype=np.float64, + ) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + lines = ax.lines[3 + i] + assert PeriodIndex(data=lines.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(lines.get_xdata(orig=False), expected_x) + expected_y += low[i].values + tm.assert_numpy_array_equal(lines.get_ydata(orig=False), expected_y) + + def test_mixed_freq_second_millisecond(self): + # GH 7772, GH 7760 + idxh = date_range("2014-07-01 09:00", freq="s", periods=50) + idxl = date_range("2014-07-01 09:00", freq="100ms", periods=500) + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + # high to low + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + assert len(ax.get_lines()) == 2 + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "ms" + + def test_mixed_freq_second_millisecond_low_to_high(self): + # GH 7772, GH 7760 + idxh = date_range("2014-07-01 09:00", freq="s", periods=50) + idxl = date_range("2014-07-01 09:00", freq="100ms", periods=500) + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + # low to high + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + assert len(ax.get_lines()) == 2 + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "ms" + + def test_irreg_dtypes(self): + # date + idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)] + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + Index(idx, dtype=object), + ) + _check_plot_works(df.plot) + + def test_irreg_dtypes_dt64(self): + # np.datetime64 + idx = date_range("1/1/2000", periods=10) + idx = idx[[0, 2, 5, 9]].astype(object) + df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 3)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(df.plot, ax=ax) + + def test_time(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + def test_time_change_xlim(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + # change xlim + ax.set_xlim("1:30", "5:00") + + # check tick labels again + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + def test_time_musec(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(microseconds=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + + us = round((_tick - int(_tick)) * 1e6) + + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if (us % 1000) != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S.%f") + elif (us // 1000) != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S.%f")[:-3] + elif s != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S") + else: + xp = time(h, m, s, us).strftime("%H:%M") + assert xp == rs + + def test_secondary_upsample(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + ax = high.plot(secondary_y=True, ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(line.get_xdata()).freq == "D" + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + for line in ax.left_ax.get_lines(): + assert PeriodIndex(line.get_xdata()).freq == "D" + + def test_secondary_legend(self): + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + + # ts + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.plot(secondary_y=["A", "B"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert leg.get_texts()[0].get_text() == "A (right)" + assert leg.get_texts()[1].get_text() == "B (right)" + assert leg.get_texts()[2].get_text() == "C" + assert leg.get_texts()[3].get_text() == "D" + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close(fig) + + def test_secondary_legend_right(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + df.plot(secondary_y=["A", "C"], mark_right=False, ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert leg.get_texts()[0].get_text() == "A" + assert leg.get_texts()[1].get_text() == "B" + assert leg.get_texts()[2].get_text() == "C" + assert leg.get_texts()[3].get_text() == "D" + mpl.pyplot.close(fig) + + def test_secondary_legend_bar(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig, ax = mpl.pyplot.subplots() + df.plot(kind="bar", secondary_y=["A"], ax=ax) + leg = ax.get_legend() + assert leg.get_texts()[0].get_text() == "A (right)" + assert leg.get_texts()[1].get_text() == "B" + mpl.pyplot.close(fig) + + def test_secondary_legend_bar_right(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig, ax = mpl.pyplot.subplots() + df.plot(kind="bar", secondary_y=["A"], mark_right=False, ax=ax) + leg = ax.get_legend() + assert leg.get_texts()[0].get_text() == "A" + assert leg.get_texts()[1].get_text() == "B" + mpl.pyplot.close(fig) + + def test_secondary_legend_multi_col(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot(secondary_y=["C", "D"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close(fig) + + def test_secondary_legend_nonts(self): + # non-ts + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + ax = df.plot(secondary_y=["A", "B"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close() + + def test_secondary_legend_nonts_multi_col(self): + # non-ts + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + ax = df.plot(secondary_y=["C", "D"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_format_date_axis(self): + rng = date_range("1/1/2012", periods=12, freq="ME") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + xaxis = ax.get_xaxis() + for line in xaxis.get_ticklabels(): + if len(line.get_text()) > 0: + assert line.get_rotation() == 30 + + def test_ax_plot(self): + x = date_range(start="2012-01-02", periods=10, freq="D") + y = list(range(len(x))) + _, ax = mpl.pyplot.subplots() + lines = ax.plot(x, y, label="Y") + tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x) + + def test_mpl_nopandas(self): + dates = [date(2008, 12, 31), date(2009, 1, 31)] + values1 = np.arange(10.0, 11.0, 0.5) + values2 = np.arange(11.0, 12.0, 0.5) + + kw = {"fmt": "-", "lw": 4} + + _, ax = mpl.pyplot.subplots() + ax.plot_date([x.toordinal() for x in dates], values1, **kw) + ax.plot_date([x.toordinal() for x in dates], values2, **kw) + + line1, line2 = ax.get_lines() + + exp = np.array([x.toordinal() for x in dates], dtype=np.float64) + tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp) + exp = np.array([x.toordinal() for x in dates], dtype=np.float64) + tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp) + + def test_irregular_ts_shared_ax_xlim(self): + # GH 2960 + from pandas.plotting._matplotlib.converter import DatetimeConverter + + ts = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + ts_irregular = ts.iloc[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] + + # plot the left section of the irregular series, then the right section + _, ax = mpl.pyplot.subplots() + ts_irregular[:5].plot(ax=ax) + ts_irregular[5:].plot(ax=ax) + + # check that axis limits are correct + left, right = ax.get_xlim() + assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) + assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) + + def test_secondary_y_non_ts_xlim(self): + # GH 3490 - non-timeseries with secondary y + index_1 = [1, 2, 3, 4] + index_2 = [5, 6, 7, 8] + s1 = Series(1, index=index_1) + s2 = Series(2, index=index_2) + + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + left_before, right_before = ax.get_xlim() + s2.plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + assert left_before >= left_after + assert right_before < right_after + + def test_secondary_y_regular_ts_xlim(self): + # GH 3490 - regular-timeseries with secondary y + index_1 = date_range(start="2000-01-01", periods=4, freq="D") + index_2 = date_range(start="2000-01-05", periods=4, freq="D") + s1 = Series(1, index=index_1) + s2 = Series(2, index=index_2) + + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + left_before, right_before = ax.get_xlim() + s2.plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + assert left_before >= left_after + assert right_before < right_after + + def test_secondary_y_mixed_freq_ts_xlim(self): + # GH 3490 - mixed frequency timeseries with secondary y + rng = date_range("2000-01-01", periods=10000, freq="min") + ts = Series(1, index=rng) + + _, ax = mpl.pyplot.subplots() + ts.plot(ax=ax) + left_before, right_before = ax.get_xlim() + ts.resample("D").mean().plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + # a downsample should not have changed either limit + assert left_before == left_after + assert right_before == right_after + + def test_secondary_y_irregular_ts_xlim(self): + # GH 3490 - irregular-timeseries with secondary y + from pandas.plotting._matplotlib.converter import DatetimeConverter + + ts = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + ts_irregular = ts.iloc[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] + + _, ax = mpl.pyplot.subplots() + ts_irregular[:5].plot(ax=ax) + # plot higher-x values on secondary axis + ts_irregular[5:].plot(secondary_y=True, ax=ax) + # ensure secondary limits aren't overwritten by plot on primary + ts_irregular[:5].plot(ax=ax) + + left, right = ax.get_xlim() + assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) + assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) + + def test_plot_outofbounds_datetime(self): + # 2579 - checking this does not raise + values = [date(1677, 1, 1), date(1677, 1, 2)] + _, ax = mpl.pyplot.subplots() + ax.plot(values) + + values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)] + ax.plot(values) + + def test_format_timedelta_ticks_narrow(self): + expected_labels = [f"00:00:00.0000000{i:0>2d}" for i in np.arange(10)] + + rng = timedelta_range("0", periods=10, freq="ns") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + df.plot(fontsize=2, ax=ax) + mpl.pyplot.draw() + labels = ax.get_xticklabels() + + result_labels = [x.get_text() for x in labels] + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels + + def test_format_timedelta_ticks_wide(self): + expected_labels = [ + "00:00:00", + "1 days 03:46:40", + "2 days 07:33:20", + "3 days 11:20:00", + "4 days 15:06:40", + "5 days 18:53:20", + "6 days 22:40:00", + "8 days 02:26:40", + "9 days 06:13:20", + ] + + rng = timedelta_range("0", periods=10, freq="1 d") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + ax = df.plot(fontsize=2, ax=ax) + mpl.pyplot.draw() + labels = ax.get_xticklabels() + + result_labels = [x.get_text() for x in labels] + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels + + def test_timedelta_plot(self): + # test issue #8711 + s = Series(range(5), timedelta_range("1day", periods=5)) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_timedelta_long_period(self): + # test long period + index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 d") + s = Series(np.random.default_rng(2).standard_normal(len(index)), index) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_timedelta_short_period(self): + # test short period + index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 ns") + s = Series(np.random.default_rng(2).standard_normal(len(index)), index) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_hist(self): + # https://github.com/matplotlib/matplotlib/issues/8459 + rng = date_range("1/1/2011", periods=10, freq="h") + x = rng + w1 = np.arange(0, 1, 0.1) + w2 = np.arange(0, 1, 0.1)[::-1] + _, ax = mpl.pyplot.subplots() + ax.hist([x, x], weights=[w1, w2]) + + def test_overlapping_datetime(self): + # GB 6608 + s1 = Series( + [1, 2, 3], + index=[ + datetime(1995, 12, 31), + datetime(2000, 12, 31), + datetime(2005, 12, 31), + ], + ) + s2 = Series( + [1, 2, 3], + index=[ + datetime(1997, 12, 31), + datetime(2003, 12, 31), + datetime(2008, 12, 31), + ], + ) + + # plot first series, then add the second series to those axes, + # then try adding the first series again + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + s2.plot(ax=ax) + s1.plot(ax=ax) + + @pytest.mark.xfail(reason="GH9053 matplotlib does not use ax.xaxis.converter") + def test_add_matplotlib_datetime64(self): + # GH9053 - ensure that a plot with PeriodConverter still understands + # datetime64 data. This still fails because matplotlib overrides the + # ax.xaxis.converter with a DatetimeConverter + s = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1970-01-02", periods=10), + ) + ax = s.plot() + with tm.assert_produces_warning(DeprecationWarning): + # multi-dimensional indexing + ax.plot(s.index, s.values, color="g") + l1, l2 = ax.lines + tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata()) + + def test_matplotlib_scatter_datetime64(self): + # https://github.com/matplotlib/matplotlib/issues/11391 + df = DataFrame(np.random.default_rng(2).random((10, 2)), columns=["x", "y"]) + df["time"] = date_range("2018-01-01", periods=10, freq="D") + _, ax = mpl.pyplot.subplots() + ax.scatter(x="time", y="y", data=df) + mpl.pyplot.draw() + label = ax.get_xticklabels()[0] + expected = "2018-01-01" + assert label.get_text() == expected + + def test_check_xticks_rot(self): + # https://github.com/pandas-dev/pandas/issues/29460 + # regular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-03"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + axes = df.plot(x="x", y="y") + _check_ticks_props(axes, xrot=0) + + def test_check_xticks_rot_irregular(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + axes = df.plot(x="x", y="y") + _check_ticks_props(axes, xrot=30) + + def test_check_xticks_rot_use_idx(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + # use timeseries index or not + axes = df.set_index("x").plot(y="y", use_index=True) + _check_ticks_props(axes, xrot=30) + axes = df.set_index("x").plot(y="y", use_index=False) + _check_ticks_props(axes, xrot=0) + + def test_check_xticks_rot_sharex(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + # separate subplots + axes = df.plot(x="x", y="y", subplots=True, sharex=True) + _check_ticks_props(axes, xrot=30) + axes = df.plot(x="x", y="y", subplots=True, sharex=False) + _check_ticks_props(axes, xrot=0) + + +def _check_plot_works(f, freq=None, series=None, *args, **kwargs): + import matplotlib.pyplot as plt + + fig = plt.gcf() + + try: + plt.clf() + ax = fig.add_subplot(211) + orig_ax = kwargs.pop("ax", plt.gca()) + orig_axfreq = getattr(orig_ax, "freq", None) + + ret = f(*args, **kwargs) + assert ret is not None # do something more intelligent + + ax = kwargs.pop("ax", plt.gca()) + if series is not None: + dfreq = series.index.freq + if isinstance(dfreq, BaseOffset): + dfreq = dfreq.rule_code + if orig_axfreq is None: + assert ax.freq == dfreq + + if freq is not None: + ax_freq = to_offset(ax.freq, is_period=True) + if freq is not None and orig_axfreq is None: + assert ax_freq == freq + + ax = fig.add_subplot(212) + kwargs["ax"] = ax + ret = f(*args, **kwargs) + assert ret is not None # TODO: do something more intelligent + + # GH18439, GH#24088, statsmodels#4772 + with tm.ensure_clean(return_filelike=True) as path: + pickle.dump(fig, path) + finally: + plt.close(fig) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..5ebf93510a61549c838d91ab2e703f9db23fd626 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py @@ -0,0 +1,155 @@ +""" Test cases for GroupBy.plot """ + + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_legend_labels, +) + +pytest.importorskip("matplotlib") + + +class TestDataFrameGroupByPlots: + def test_series_groupby_plotting_nominally_works(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + + weight.groupby(gender).plot() + + def test_series_groupby_plotting_nominally_works_hist(self): + n = 10 + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + height.groupby(gender).hist() + + def test_series_groupby_plotting_nominally_works_alpha(self): + n = 10 + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + # Regression test for GH8733 + height.groupby(gender).plot(alpha=0.5) + + def test_plotting_with_float_index_works(self): + # GH 7025 + df = DataFrame( + { + "def": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "val": np.random.default_rng(2).standard_normal(9), + }, + index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0], + ) + + df.groupby("def")["val"].plot() + + def test_plotting_with_float_index_works_apply(self): + # GH 7025 + df = DataFrame( + { + "def": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "val": np.random.default_rng(2).standard_normal(9), + }, + index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0], + ) + df.groupby("def")["val"].apply(lambda x: x.plot()) + + def test_hist_single_row(self): + # GH10214 + bins = np.arange(80, 100 + 2, 1) + df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]}) + df["Mark"].hist(by=df["ByCol"], bins=bins) + + def test_hist_single_row_single_bycol(self): + # GH10214 + bins = np.arange(80, 100 + 2, 1) + df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]}) + df["Mark"].hist(by=df["ByCol"], bins=bins) + + def test_plot_submethod_works(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + df.groupby("z").plot.scatter("x", "y") + + def test_plot_submethod_works_line(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + df.groupby("z")["x"].plot.line() + + def test_plot_kwargs(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + + res = df.groupby("z").plot(kind="scatter", x="x", y="y") + # check that a scatter plot is effectively plotted: the axes should + # contain a PathCollection from the scatter plot (GH11805) + assert len(res["a"].collections) == 1 + + def test_plot_kwargs_scatter(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + res = df.groupby("z").plot.scatter(x="x", y="y") + assert len(res["a"].collections) == 1 + + @pytest.mark.parametrize("column, expected_axes_num", [(None, 2), ("b", 1)]) + def test_groupby_hist_frame_with_legend(self, column, expected_axes_num): + # GH 6279 - DataFrameGroupBy histogram can have a legend + expected_layout = (1, expected_axes_num) + expected_labels = column or [["a"], ["b"]] + + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + for axes in g.hist(legend=True, column=column): + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + for ax, expected_label in zip(axes[0], expected_labels): + _check_legend_labels(ax, expected_label) + + @pytest.mark.parametrize("column", [None, "b"]) + def test_groupby_hist_frame_with_legend_raises(self, column): + # GH 6279 - DataFrameGroupBy histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + g.hist(legend=True, column=column, label="d") + + def test_groupby_hist_series_with_legend(self): + # GH 6279 - SeriesGroupBy histogram can have a legend + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + for ax in g["a"].hist(legend=True): + _check_axes_shape(ax, axes_num=1, layout=(1, 1)) + _check_legend_labels(ax, ["1", "2"]) + + def test_groupby_hist_series_with_legend_raises(self): + # GH 6279 - SeriesGroupBy histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + g.hist(legend=True, label="d") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py new file mode 100644 index 0000000000000000000000000000000000000000..4d17f87fdc7bc1456a118c84b76c631544572fd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py @@ -0,0 +1,971 @@ +""" Test cases for .hist method """ +import re + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + date_range, + to_datetime, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_colors, + _check_legend_labels, + _check_patches_all_filled, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + get_x_axis, + get_y_axis, +) + +mpl = pytest.importorskip("matplotlib") + + +@pytest.fixture +def ts(): + return Series( + np.arange(30, dtype=np.float64), + index=date_range("2020-01-01", periods=30, freq="B"), + name="ts", + ) + + +class TestSeriesPlots: + @pytest.mark.parametrize("kwargs", [{}, {"grid": False}, {"figsize": (8, 10)}]) + def test_hist_legacy_kwargs(self, ts, kwargs): + _check_plot_works(ts.hist, **kwargs) + + @pytest.mark.parametrize("kwargs", [{}, {"bins": 5}]) + def test_hist_legacy_kwargs_warning(self, ts, kwargs): + # _check_plot_works adds an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(ts.hist, by=ts.index.month, **kwargs) + + def test_hist_legacy_ax(self, ts): + fig, ax = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, ax=ax, default_axes=True) + + def test_hist_legacy_ax_and_fig(self, ts): + fig, ax = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, ax=ax, figure=fig, default_axes=True) + + def test_hist_legacy_fig(self, ts): + fig, _ = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, figure=fig, default_axes=True) + + def test_hist_legacy_multi_ax(self, ts): + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2) + _check_plot_works(ts.hist, figure=fig, ax=ax1, default_axes=True) + _check_plot_works(ts.hist, figure=fig, ax=ax2, default_axes=True) + + def test_hist_legacy_by_fig_error(self, ts): + fig, _ = mpl.pyplot.subplots(1, 1) + msg = ( + "Cannot pass 'figure' when using the 'by' argument, since a new 'Figure' " + "instance will be created" + ) + with pytest.raises(ValueError, match=msg): + ts.hist(by=ts.index, figure=fig) + + def test_hist_bins_legacy(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + ax = df.hist(bins=2)[0][0] + assert len(ax.patches) == 2 + + def test_hist_layout(self, hist_df): + df = hist_df + msg = "The 'layout' keyword is not supported when 'by' is None" + with pytest.raises(ValueError, match=msg): + df.height.hist(layout=(1, 1)) + + with pytest.raises(ValueError, match=msg): + df.height.hist(layout=[1, 1]) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, layout, axes_num, res_layout", + [ + ["gender", (2, 1), 2, (2, 1)], + ["gender", (3, -1), 2, (3, 1)], + ["category", (4, 1), 4, (4, 1)], + ["category", (2, -1), 4, (2, 2)], + ["category", (3, -1), 4, (3, 2)], + ["category", (-1, 4), 4, (1, 4)], + ["classroom", (2, 2), 3, (2, 2)], + ], + ) + def test_hist_layout_with_by(self, hist_df, by, layout, axes_num, res_layout): + df = hist_df + + # _check_plot_works adds an `ax` kwarg to the method call + # so we get a warning about an axis being cleared, even + # though we don't explicing pass one, see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.height.hist, by=getattr(df, by), layout=layout) + _check_axes_shape(axes, axes_num=axes_num, layout=res_layout) + + def test_hist_layout_with_by_shape(self, hist_df): + df = hist_df + + axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7)) + _check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) + + def test_hist_no_overlap(self): + from matplotlib.pyplot import ( + gcf, + subplot, + ) + + x = Series(np.random.default_rng(2).standard_normal(2)) + y = Series(np.random.default_rng(2).standard_normal(2)) + subplot(121) + x.hist() + subplot(122) + y.hist() + fig = gcf() + axes = fig.axes + assert len(axes) == 2 + + def test_hist_by_no_extra_plots(self, hist_df): + df = hist_df + df.height.hist(by=df.gender) + assert len(mpl.pyplot.get_fignums()) == 1 + + def test_plot_fails_when_ax_differs_from_figure(self, ts): + from pylab import figure + + fig1 = figure() + fig2 = figure() + ax1 = fig1.add_subplot(111) + msg = "passed axis not bound to passed figure" + with pytest.raises(AssertionError, match=msg): + ts.hist(ax=ax1, figure=fig2) + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + ser = Series(np.random.default_rng(2).integers(1, 10)) + ax = ser.hist(histtype=histtype) + _check_patches_all_filled(ax, filled=expected) + + @pytest.mark.parametrize( + "by, expected_axes_num, expected_layout", [(None, 1, (1, 1)), ("b", 2, (1, 2))] + ) + def test_hist_with_legend(self, by, expected_axes_num, expected_layout): + # GH 6279 - Series histogram can have a legend + index = 15 * ["1"] + 15 * ["2"] + s = Series(np.random.default_rng(2).standard_normal(30), index=index, name="a") + s.index.name = "b" + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by) + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + _check_legend_labels(axes, "a") + + @pytest.mark.parametrize("by", [None, "b"]) + def test_hist_with_legend_raises(self, by): + # GH 6279 - Series histogram with legend and label raises + index = 15 * ["1"] + 15 * ["2"] + s = Series(np.random.default_rng(2).standard_normal(30), index=index, name="a") + s.index.name = "b" + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + s.hist(legend=True, by=by, label="c") + + def test_hist_kwargs(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 5 + _check_text_labels(ax.yaxis.get_label(), "Frequency") + + def test_hist_kwargs_horizontal(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + ax = ts.plot.hist(orientation="horizontal", ax=ax) + _check_text_labels(ax.xaxis.get_label(), "Frequency") + + def test_hist_kwargs_align(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + ax = ts.plot.hist(align="left", stacked=True, ax=ax) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_hist_kde(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + # ticks are values, thus ticklabels are blank + _check_text_labels(xlabels, [""] * len(xlabels)) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + def test_hist_kde_plot_works(self, ts): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.kde) + + def test_hist_kde_density_works(self, ts): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.density) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_hist_kde_logy(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.kde(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + _check_text_labels(xlabels, [""] * len(xlabels)) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + def test_hist_kde_color_bins(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax) + _check_ax_scales(ax, yaxis="log") + assert len(ax.patches) == 10 + _check_colors(ax.patches, facecolors=["b"] * 10) + + def test_hist_kde_color(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.kde(logy=True, color="r", ax=ax) + _check_ax_scales(ax, yaxis="log") + lines = ax.get_lines() + assert len(lines) == 1 + _check_colors(lines, ["r"]) + + +class TestDataFramePlots: + @pytest.mark.slow + def test_hist_df_legacy(self, hist_df): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(hist_df.hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout(self): + # make sure layout is handled + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, grid=False) + _check_axes_shape(axes, axes_num=3, layout=(2, 2)) + assert not axes[1, 1].get_visible() + + _check_plot_works(df[[2]].hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout2(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1))) + _check_plot_works(df.hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout3(self): + # make sure layout is handled + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + df[5] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, layout=(4, 2)) + _check_axes_shape(axes, axes_num=6, layout=(4, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", [{"sharex": True, "sharey": True}, {"figsize": (8, 10)}, {"bins": 5}] + ) + def test_hist_df_legacy_layout_kwargs(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + df[5] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + # make sure sharex, sharey is handled + # handle figsize arg + # check bins argument + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(df.hist, **kwargs) + + @pytest.mark.slow + def test_hist_df_legacy_layout_labelsize_rot(self, frame_or_series): + # make sure xlabelsize and xrot are handled + obj = frame_or_series(range(10)) + xf, yf = 20, 18 + xrot, yrot = 30, 40 + axes = obj.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + _check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + + @pytest.mark.slow + def test_hist_df_legacy_rectangles(self): + from matplotlib.patches import Rectangle + + ser = Series(range(10)) + ax = ser.hist(cumulative=True, bins=4, density=True) + # height of last bin (index 5) must be 1.0 + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + tm.assert_almost_equal(rects[-1].get_height(), 1.0) + + @pytest.mark.slow + def test_hist_df_legacy_scale(self): + ser = Series(range(10)) + ax = ser.hist(log=True) + # scale of y must be 'log' + _check_ax_scales(ax, yaxis="log") + + @pytest.mark.slow + def test_hist_df_legacy_external_error(self): + ser = Series(range(10)) + # propagate attr exception from matplotlib.Axes.hist + with tm.external_error_raised(AttributeError): + ser.hist(foo="bar") + + def test_hist_non_numerical_or_datetime_raises(self): + # gh-10444, GH32590 + df = DataFrame( + { + "a": np.random.default_rng(2).random(10), + "b": np.random.default_rng(2).integers(0, 10, 10), + "c": to_datetime( + np.random.default_rng(2).integers( + 1582800000000000000, 1583500000000000000, 10, dtype=np.int64 + ) + ), + "d": to_datetime( + np.random.default_rng(2).integers( + 1582800000000000000, 1583500000000000000, 10, dtype=np.int64 + ), + utc=True, + ), + } + ) + df_o = df.astype(object) + + msg = "hist method requires numerical or datetime columns, nothing to plot." + with pytest.raises(ValueError, match=msg): + df_o.hist() + + @pytest.mark.parametrize( + "layout_test", + ( + {"layout": None, "expected_size": (2, 2)}, # default is 2x2 + {"layout": (2, 2), "expected_size": (2, 2)}, + {"layout": (4, 1), "expected_size": (4, 1)}, + {"layout": (1, 4), "expected_size": (1, 4)}, + {"layout": (3, 3), "expected_size": (3, 3)}, + {"layout": (-1, 4), "expected_size": (1, 4)}, + {"layout": (4, -1), "expected_size": (4, 1)}, + {"layout": (-1, 2), "expected_size": (2, 2)}, + {"layout": (2, -1), "expected_size": (2, 2)}, + ), + ) + def test_hist_layout(self, layout_test): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + axes = df.hist(layout=layout_test["layout"]) + expected = layout_test["expected_size"] + _check_axes_shape(axes, axes_num=3, layout=expected) + + def test_hist_layout_error(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + # layout too small for all 4 plots + msg = "Layout of 1x1 must be larger than required size 3" + with pytest.raises(ValueError, match=msg): + df.hist(layout=(1, 1)) + + # invalid format for layout + msg = re.escape("Layout must be a tuple of (rows, columns)") + with pytest.raises(ValueError, match=msg): + df.hist(layout=(1,)) + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.hist(layout=(-1, -1)) + + # GH 9351 + def test_tight_layout(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=100, + dtype=np.int64, + ) + ) + # Use default_axes=True when plotting method generate subplots itself + _check_plot_works(df.hist, default_axes=True) + mpl.pyplot.tight_layout() + + def test_hist_subplot_xrot(self): + # GH 30288 + df = DataFrame( + { + "length": [1.5, 0.5, 1.2, 0.9, 3], + "animal": ["pig", "rabbit", "pig", "pig", "rabbit"], + } + ) + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + column="length", + by="animal", + bins=5, + xrot=0, + ) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.parametrize( + "column, expected", + [ + (None, ["width", "length", "height"]), + (["length", "width", "height"], ["length", "width", "height"]), + ], + ) + def test_hist_column_order_unchanged(self, column, expected): + # GH29235 + + df = DataFrame( + { + "width": [0.7, 0.2, 0.15, 0.2, 1.1], + "length": [1.5, 0.5, 1.2, 0.9, 3], + "height": [3, 0.5, 3.4, 2, 1], + }, + index=["pig", "rabbit", "duck", "chicken", "horse"], + ) + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + column=column, + layout=(1, 3), + ) + result = [axes[0, i].get_title() for i in range(3)] + assert result == expected + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + df = DataFrame( + np.random.default_rng(2).integers(1, 10, size=(100, 2)), columns=["a", "b"] + ) + ax = df.hist(histtype=histtype) + _check_patches_all_filled(ax, filled=expected) + + @pytest.mark.parametrize("by", [None, "c"]) + @pytest.mark.parametrize("column", [None, "b"]) + def test_hist_with_legend(self, by, column): + # GH 6279 - DataFrame histogram can have a legend + expected_axes_num = 1 if by is None and column is not None else 2 + expected_layout = (1, expected_axes_num) + expected_labels = column or ["a", "b"] + if by is not None: + expected_labels = [expected_labels] * 2 + + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + legend=True, + by=by, + column=column, + ) + + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + if by is None and column is None: + axes = axes[0] + for expected_label, ax in zip(expected_labels, axes): + _check_legend_labels(ax, expected_label) + + @pytest.mark.parametrize("by", [None, "c"]) + @pytest.mark.parametrize("column", [None, "b"]) + def test_hist_with_legend_raises(self, by, column): + # GH 6279 - DataFrame histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + df.hist(legend=True, by=by, column=column, label="d") + + def test_hist_df_kwargs(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 10 + + def test_hist_df_with_nonnumerics(self): + # GH 9853 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "B", "C", "D"], + ) + df["E"] = ["x", "y"] * 5 + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 20 + + def test_hist_df_with_nonnumerics_no_bins(self): + # GH 9853 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "B", "C", "D"], + ) + df["E"] = ["x", "y"] * 5 + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(ax=ax) # bins=10 + assert len(ax.patches) == 40 + + def test_hist_secondary_legend(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + + # primary -> secondary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, ax=ax) + df["b"].plot.hist(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_hist_secondary_secondary(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + # secondary -> secondary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax) + df["b"].plot.hist(ax=ax, legend=True, secondary_y=True) + # both legends are draw on left ax + # left axis must be invisible, right axis must be visible + _check_legend_labels(ax.left_ax, labels=["a (right)", "b (right)"]) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_hist_secondary_primary(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + # secondary -> primary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax) + # right axes is returned + df["b"].plot.hist(ax=ax, legend=True) + # both legends are draw on left ax + # left and right axis must be visible + _check_legend_labels(ax.left_ax, labels=["a (right)", "b"]) + assert ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_hist_with_nans_and_weights(self): + # GH 48884 + mpl_patches = pytest.importorskip("matplotlib.patches") + df = DataFrame( + [[np.nan, 0.2, 0.3], [0.4, np.nan, np.nan], [0.7, 0.8, 0.9]], + columns=list("abc"), + ) + weights = np.array([0.25, 0.3, 0.45]) + no_nan_df = DataFrame([[0.4, 0.2, 0.3], [0.7, 0.8, 0.9]], columns=list("abc")) + no_nan_weights = np.array([[0.3, 0.25, 0.25], [0.45, 0.45, 0.45]]) + + _, ax0 = mpl.pyplot.subplots() + df.plot.hist(ax=ax0, weights=weights) + rects = [x for x in ax0.get_children() if isinstance(x, mpl_patches.Rectangle)] + heights = [rect.get_height() for rect in rects] + _, ax1 = mpl.pyplot.subplots() + no_nan_df.plot.hist(ax=ax1, weights=no_nan_weights) + no_nan_rects = [ + x for x in ax1.get_children() if isinstance(x, mpl_patches.Rectangle) + ] + no_nan_heights = [rect.get_height() for rect in no_nan_rects] + assert all(h0 == h1 for h0, h1 in zip(heights, no_nan_heights)) + + idxerror_weights = np.array([[0.3, 0.25], [0.45, 0.45]]) + + msg = "weights must have the same shape as data, or be a single column" + with pytest.raises(ValueError, match=msg): + _, ax2 = mpl.pyplot.subplots() + no_nan_df.plot.hist(ax=ax2, weights=idxerror_weights) + + +class TestDataFrameGroupByPlots: + def test_grouped_hist_legacy(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(10) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + + axes = _grouped_hist(df.A, by=df.C) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + def test_grouped_hist_legacy_axes_shape_no_col(self): + rs = np.random.default_rng(10) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + axes = df.hist(by=df.C) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + def test_grouped_hist_legacy_single_key(self): + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + # group by a key with single value + axes = df.hist(by="D", rot=30) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + _check_ticks_props(axes, xrot=30) + + def test_grouped_hist_legacy_grouped_hist_kwargs(self): + from matplotlib.patches import Rectangle + + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + # make sure kwargs to hist are handled + xf, yf = 20, 18 + xrot, yrot = 30, 40 + + axes = _grouped_hist( + df.A, + by=df.C, + cumulative=True, + bins=4, + xlabelsize=xf, + xrot=xrot, + ylabelsize=yf, + yrot=yrot, + density=True, + ) + # height of last bin (index 5) must be 1.0 + for ax in axes.ravel(): + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + height = rects[-1].get_height() + tm.assert_almost_equal(height, 1.0) + _check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + + def test_grouped_hist_legacy_grouped_hist(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + axes = _grouped_hist(df.A, by=df.C, log=True) + # scale of y must be 'log' + _check_ax_scales(axes, yaxis="log") + + def test_grouped_hist_legacy_external_err(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + # propagate attr exception from matplotlib.Axes.hist + with tm.external_error_raised(AttributeError): + _grouped_hist(df.A, by=df.C, foo="bar") + + def test_grouped_hist_legacy_figsize_err(self): + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + msg = "Specify figure size by tuple instead" + with pytest.raises(ValueError, match=msg): + df.hist(by="C", figsize="default") + + def test_grouped_hist_legacy2(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender_int = np.random.default_rng(2).choice([0, 1], size=n) + df_int = DataFrame({"height": height, "weight": weight, "gender": gender_int}) + gb = df_int.groupby("gender") + axes = gb.hist() + assert len(axes) == 2 + assert len(mpl.pyplot.get_fignums()) == 2 + + @pytest.mark.slow + @pytest.mark.parametrize( + "msg, plot_col, by_col, layout", + [ + [ + "Layout of 1x1 must be larger than required size 2", + "weight", + "gender", + (1, 1), + ], + [ + "Layout of 1x3 must be larger than required size 4", + "height", + "category", + (1, 3), + ], + [ + "At least one dimension of layout must be positive", + "height", + "category", + (-1, -1), + ], + ], + ) + def test_grouped_hist_layout_error(self, hist_df, msg, plot_col, by_col, layout): + df = hist_df + with pytest.raises(ValueError, match=msg): + df.hist(column=plot_col, by=getattr(df, by_col), layout=layout) + + @pytest.mark.slow + def test_grouped_hist_layout_warning(self, hist_df): + df = hist_df + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + df.hist, column="height", by=df.gender, layout=(2, 1) + ) + _check_axes_shape(axes, axes_num=2, layout=(2, 1)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "layout, check_layout, figsize", + [[(4, 1), (4, 1), None], [(-1, 1), (4, 1), None], [(4, 2), (4, 2), (12, 8)]], + ) + def test_grouped_hist_layout_figsize(self, hist_df, layout, check_layout, figsize): + df = hist_df + axes = df.hist(column="height", by=df.category, layout=layout, figsize=figsize) + _check_axes_shape(axes, axes_num=4, layout=check_layout, figsize=figsize) + + @pytest.mark.slow + @pytest.mark.parametrize("kwargs", [{}, {"column": "height", "layout": (2, 2)}]) + def test_grouped_hist_layout_by_warning(self, hist_df, kwargs): + df = hist_df + # GH 6769 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, by="classroom", **kwargs) + _check_axes_shape(axes, axes_num=3, layout=(2, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs, axes_num, layout", + [ + [{"by": "gender", "layout": (3, 5)}, 2, (3, 5)], + [{"column": ["height", "weight", "category"]}, 3, (2, 2)], + ], + ) + def test_grouped_hist_layout_axes(self, hist_df, kwargs, axes_num, layout): + df = hist_df + axes = df.hist(**kwargs) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + def test_grouped_hist_multiple_axes(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + fig, axes = mpl.pyplot.subplots(2, 3) + returned = df.hist(column=["height", "weight", "category"], ax=axes[0]) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[0]) + assert returned[0].figure is fig + + def test_grouped_hist_multiple_axes_no_cols(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + fig, axes = mpl.pyplot.subplots(2, 3) + returned = df.hist(by="classroom", ax=axes[1]) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[1]) + assert returned[0].figure is fig + + def test_grouped_hist_multiple_axes_error(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + fig, axes = mpl.pyplot.subplots(2, 3) + # pass different number of axes from required + msg = "The number of passed axes must be 1, the same as the output plot" + with pytest.raises(ValueError, match=msg): + axes = df.hist(column="height", ax=axes) + + def test_axis_share_x(self, hist_df): + df = hist_df + # GH4089 + ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True) + + # share x + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + + # don't share y + assert not get_y_axis(ax1).joined(ax1, ax2) + assert not get_y_axis(ax2).joined(ax1, ax2) + + def test_axis_share_y(self, hist_df): + df = hist_df + ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True) + + # share y + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + # don't share x + assert not get_x_axis(ax1).joined(ax1, ax2) + assert not get_x_axis(ax2).joined(ax1, ax2) + + def test_axis_share_xy(self, hist_df): + df = hist_df + ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True) + + # share both x and y + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + df = DataFrame( + np.random.default_rng(2).integers(1, 10, size=(10, 2)), columns=["a", "b"] + ) + ax = df.hist(by="a", histtype=histtype) + _check_patches_all_filled(ax, filled=expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb657c2a800fefe2d509ddfb398399af4ce8649 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py @@ -0,0 +1,720 @@ +""" Test cases for misc plot functions """ +import os + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, + date_range, + interval_range, + period_range, + plotting, + read_csv, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_colors, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, +) + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") +cm = pytest.importorskip("matplotlib.cm") + + +@pytest.fixture +def iris(datapath) -> DataFrame: + """ + The iris dataset as a DataFrame. + """ + return read_csv(datapath("io", "data", "csv", "iris.csv")) + + +@td.skip_if_installed("matplotlib") +def test_import_error_message(): + # GH-19810 + df = DataFrame({"A": [1, 2]}) + + with pytest.raises(ImportError, match="matplotlib is required for plotting"): + df.plot() + + +def test_get_accessor_args(): + func = plotting._core.PlotAccessor._get_call_args + + msg = "Called plot accessor for type list, expected Series or DataFrame" + with pytest.raises(TypeError, match=msg): + func(backend_name="", data=[], args=[], kwargs={}) + + msg = "should not be called with positional arguments" + with pytest.raises(TypeError, match=msg): + func(backend_name="", data=Series(dtype=object), args=["line", None], kwargs={}) + + x, y, kind, kwargs = func( + backend_name="", + data=DataFrame(), + args=["x"], + kwargs={"y": "y", "kind": "bar", "grid": False}, + ) + assert x == "x" + assert y == "y" + assert kind == "bar" + assert kwargs == {"grid": False} + + x, y, kind, kwargs = func( + backend_name="pandas.plotting._matplotlib", + data=Series(dtype=object), + args=[], + kwargs={}, + ) + assert x is None + assert y is None + assert kind == "line" + assert len(kwargs) == 24 + + +@pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds) +@pytest.mark.parametrize( + "data", [DataFrame(np.arange(15).reshape(5, 3)), Series(range(5))] +) +@pytest.mark.parametrize( + "index", + [ + Index(range(5)), + date_range("2020-01-01", periods=5), + period_range("2020-01-01", periods=5), + ], +) +def test_savefig(kind, data, index): + fig, ax = plt.subplots() + data.index = index + kwargs = {} + if kind in ["hexbin", "scatter", "pie"]: + if isinstance(data, Series): + pytest.skip(f"{kind} not supported with Series") + kwargs = {"x": 0, "y": 1} + data.plot(kind=kind, ax=ax, **kwargs) + fig.savefig(os.devnull) + + +class TestSeriesPlots: + def test_autocorrelation_plot(self): + from pandas.plotting import autocorrelation_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(autocorrelation_plot, series=ser) + _check_plot_works(autocorrelation_plot, series=ser.values) + + ax = autocorrelation_plot(ser, label="Test") + _check_legend_labels(ax, labels=["Test"]) + + @pytest.mark.parametrize("kwargs", [{}, {"lag": 5}]) + def test_lag_plot(self, kwargs): + from pandas.plotting import lag_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + _check_plot_works(lag_plot, series=ser, **kwargs) + + def test_bootstrap_plot(self): + from pandas.plotting import bootstrap_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + _check_plot_works(bootstrap_plot, series=ser, size=10) + + +class TestDataFramePlots: + @pytest.mark.parametrize("pass_axis", [False, True]) + def test_scatter_matrix_axis(self, pass_axis): + pytest.importorskip("scipy") + scatter_matrix = plotting.scatter_matrix + + ax = None + if pass_axis: + _, ax = mpl.pyplot.subplots(3, 3) + + df = DataFrame(np.random.default_rng(2).standard_normal((100, 3))) + + # we are plotting multiples on a sub-plot + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + scatter_matrix, + frame=df, + range_padding=0.1, + ax=ax, + ) + axes0_labels = axes[0][0].yaxis.get_majorticklabels() + # GH 5662 + expected = ["-2", "0", "2"] + _check_text_labels(axes0_labels, expected) + _check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + @pytest.mark.parametrize("pass_axis", [False, True]) + def test_scatter_matrix_axis_smaller(self, pass_axis): + pytest.importorskip("scipy") + scatter_matrix = plotting.scatter_matrix + + ax = None + if pass_axis: + _, ax = mpl.pyplot.subplots(3, 3) + + df = DataFrame(np.random.default_rng(11).standard_normal((100, 3))) + df[0] = (df[0] - 2) / 3 + + # we are plotting multiples on a sub-plot + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + scatter_matrix, + frame=df, + range_padding=0.1, + ax=ax, + ) + axes0_labels = axes[0][0].yaxis.get_majorticklabels() + expected = ["-1.0", "-0.5", "0.0"] + _check_text_labels(axes0_labels, expected) + _check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + @pytest.mark.slow + def test_andrews_curves_no_warning(self, iris): + from pandas.plotting import andrews_curves + + df = iris + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(andrews_curves, frame=df, class_column="Name") + + @pytest.mark.slow + @pytest.mark.parametrize( + "linecolors", + [ + ("#556270", "#4ECDC4", "#C7F464"), + ["dodgerblue", "aquamarine", "seagreen"], + ], + ) + @pytest.mark.parametrize( + "df", + [ + "iris", + DataFrame( + { + "A": np.random.default_rng(2).standard_normal(10), + "B": np.random.default_rng(2).standard_normal(10), + "C": np.random.default_rng(2).standard_normal(10), + "Name": ["A"] * 10, + } + ), + ], + ) + def test_andrews_curves_linecolors(self, request, df, linecolors): + from pandas.plotting import andrews_curves + + if isinstance(df, str): + df = request.getfixturevalue(df) + ax = _check_plot_works( + andrews_curves, frame=df, class_column="Name", color=linecolors + ) + _check_colors( + ax.get_lines()[:10], linecolors=linecolors, mapping=df["Name"][:10] + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "df", + [ + "iris", + DataFrame( + { + "A": np.random.default_rng(2).standard_normal(10), + "B": np.random.default_rng(2).standard_normal(10), + "C": np.random.default_rng(2).standard_normal(10), + "Name": ["A"] * 10, + } + ), + ], + ) + def test_andrews_curves_cmap(self, request, df): + from pandas.plotting import andrews_curves + + if isinstance(df, str): + df = request.getfixturevalue(df) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + ax = _check_plot_works( + andrews_curves, frame=df, class_column="Name", color=cmaps + ) + _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_andrews_curves_handle(self): + from pandas.plotting import andrews_curves + + colors = ["b", "g", "r"] + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors}) + ax = andrews_curves(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, linecolors=colors) + + @pytest.mark.slow + @pytest.mark.parametrize( + "color", + [("#556270", "#4ECDC4", "#C7F464"), ["dodgerblue", "aquamarine", "seagreen"]], + ) + def test_parallel_coordinates_colors(self, iris, color): + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", color=color + ) + _check_colors(ax.get_lines()[:10], linecolors=color, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_parallel_coordinates_cmap(self, iris): + from matplotlib import cm + + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", colormap=cm.jet + ) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_parallel_coordinates_line_diff(self, iris): + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name") + nlines = len(ax.get_lines()) + nxticks = len(ax.xaxis.get_ticklabels()) + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", axvlines=False + ) + assert len(ax.get_lines()) == (nlines - nxticks) + + @pytest.mark.slow + def test_parallel_coordinates_handles(self, iris): + from pandas.plotting import parallel_coordinates + + df = iris + colors = ["b", "g", "r"] + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors}) + ax = parallel_coordinates(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, linecolors=colors) + + # not sure if this is indicative of a problem + @pytest.mark.filterwarnings("ignore:Attempting to set:UserWarning") + def test_parallel_coordinates_with_sorted_labels(self): + """For #15908""" + from pandas.plotting import parallel_coordinates + + df = DataFrame( + { + "feat": list(range(30)), + "class": [2 for _ in range(10)] + + [3 for _ in range(10)] + + [1 for _ in range(10)], + } + ) + ax = parallel_coordinates(df, "class", sort_labels=True) + polylines, labels = ax.get_legend_handles_labels() + color_label_tuples = zip( + [polyline.get_color() for polyline in polylines], labels + ) + ordered_color_label_tuples = sorted(color_label_tuples, key=lambda x: x[1]) + prev_next_tupels = zip( + list(ordered_color_label_tuples[0:-1]), list(ordered_color_label_tuples[1:]) + ) + for prev, nxt in prev_next_tupels: + # labels and colors are ordered strictly increasing + assert prev[1] < nxt[1] and prev[0] < nxt[0] + + def test_radviz_no_warning(self, iris): + from pandas.plotting import radviz + + df = iris + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(radviz, frame=df, class_column="Name") + + @pytest.mark.parametrize( + "color", + [("#556270", "#4ECDC4", "#C7F464"), ["dodgerblue", "aquamarine", "seagreen"]], + ) + def test_radviz_color(self, iris, color): + from pandas.plotting import radviz + + df = iris + ax = _check_plot_works(radviz, frame=df, class_column="Name", color=color) + # skip Circle drawn as ticks + patches = [p for p in ax.patches[:20] if p.get_label() != ""] + _check_colors(patches[:10], facecolors=color, mapping=df["Name"][:10]) + + def test_radviz_color_cmap(self, iris): + from matplotlib import cm + + from pandas.plotting import radviz + + df = iris + ax = _check_plot_works(radviz, frame=df, class_column="Name", colormap=cm.jet) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + patches = [p for p in ax.patches[:20] if p.get_label() != ""] + _check_colors(patches, facecolors=cmaps, mapping=df["Name"][:10]) + + def test_radviz_colors_handles(self): + from pandas.plotting import radviz + + colors = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0]] + df = DataFrame( + {"A": [1, 2, 3], "B": [2, 1, 3], "C": [3, 2, 1], "Name": ["b", "g", "r"]} + ) + ax = radviz(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=colors) + + def test_subplot_titles(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + + # Case len(title) == len(df) + plot = df.plot(subplots=True, title=title) + assert [p.get_title() for p in plot] == title + + def test_subplot_titles_too_much(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case len(title) > len(df) + msg = ( + "The length of `title` must equal the number of columns if " + "using `title` of type `list` and `subplots=True`" + ) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, title=title + ["kittens > puppies"]) + + def test_subplot_titles_too_little(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + msg = ( + "The length of `title` must equal the number of columns if " + "using `title` of type `list` and `subplots=True`" + ) + # Case len(title) < len(df) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, title=title[:2]) + + def test_subplot_titles_subplots_false(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case subplots=False and title is of type list + msg = ( + "Using `title` of type `list` is not supported unless " + "`subplots=True` is passed" + ) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=False, title=title) + + def test_subplot_titles_numeric_square_layout(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case df with 3 numeric columns but layout of (2,2) + plot = df.drop("SepalWidth", axis=1).plot( + subplots=True, layout=(2, 2), title=title[:-1] + ) + title_list = [ax.get_title() for sublist in plot for ax in sublist] + assert title_list == title[:3] + [""] + + def test_get_standard_colors_random_seed(self): + # GH17525 + df = DataFrame(np.zeros((10, 10))) + + # Make sure that the random seed isn't reset by get_standard_colors + plotting.parallel_coordinates(df, 0) + rand1 = np.random.default_rng(None).random() + plotting.parallel_coordinates(df, 0) + rand2 = np.random.default_rng(None).random() + assert rand1 != rand2 + + def test_get_standard_colors_consistency(self): + # GH17525 + # Make sure it produces the same colors every time it's called + from pandas.plotting._matplotlib.style import get_standard_colors + + color1 = get_standard_colors(1, color_type="random") + color2 = get_standard_colors(1, color_type="random") + assert color1 == color2 + + def test_get_standard_colors_default_num_colors(self): + from pandas.plotting._matplotlib.style import get_standard_colors + + # Make sure the default color_types returns the specified amount + color1 = get_standard_colors(1, color_type="default") + color2 = get_standard_colors(9, color_type="default") + color3 = get_standard_colors(20, color_type="default") + assert len(color1) == 1 + assert len(color2) == 9 + assert len(color3) == 20 + + def test_plot_single_color(self): + # Example from #20585. All 3 bars should have the same color + df = DataFrame( + { + "account-start": ["2017-02-03", "2017-03-03", "2017-01-01"], + "client": ["Alice Anders", "Bob Baker", "Charlie Chaplin"], + "balance": [-1432.32, 10.43, 30000.00], + "db-id": [1234, 2424, 251], + "proxy-id": [525, 1525, 2542], + "rank": [52, 525, 32], + } + ) + ax = df.client.value_counts().plot.bar() + colors = [rect.get_facecolor() for rect in ax.get_children()[0:3]] + assert all(color == colors[0] for color in colors) + + def test_get_standard_colors_no_appending(self): + # GH20726 + + # Make sure not to add more colors so that matplotlib can cycle + # correctly. + from matplotlib import cm + + from pandas.plotting._matplotlib.style import get_standard_colors + + color_before = cm.gnuplot(range(5)) + color_after = get_standard_colors(1, color=color_before) + assert len(color_after) == len(color_before) + + df = DataFrame( + np.random.default_rng(2).standard_normal((48, 4)), columns=list("ABCD") + ) + + color_list = cm.gnuplot(np.linspace(0, 1, 16)) + p = df.A.plot.bar(figsize=(16, 7), color=color_list) + assert p.patches[1].get_facecolor() == p.patches[17].get_facecolor() + + @pytest.mark.parametrize("kind", ["bar", "line"]) + def test_dictionary_color(self, kind): + # issue-8193 + # Test plot color dictionary format + data_files = ["a", "b"] + + expected = [(0.5, 0.24, 0.6), (0.3, 0.7, 0.7)] + + df1 = DataFrame(np.random.default_rng(2).random((2, 2)), columns=data_files) + dic_color = {"b": (0.3, 0.7, 0.7), "a": (0.5, 0.24, 0.6)} + + ax = df1.plot(kind=kind, color=dic_color) + if kind == "bar": + colors = [rect.get_facecolor()[0:-1] for rect in ax.get_children()[0:3:2]] + else: + colors = [rect.get_color() for rect in ax.get_lines()[0:2]] + assert all(color == expected[index] for index, color in enumerate(colors)) + + def test_bar_plot(self): + # GH38947 + # Test bar plot with string and int index + from matplotlib.text import Text + + expected = [Text(0, 0, "0"), Text(1, 0, "Total")] + + df = DataFrame( + { + "a": [1, 2], + }, + index=Index([0, "Total"]), + ) + plot_bar = df.plot.bar() + assert all( + (a.get_text() == b.get_text()) + for a, b in zip(plot_bar.get_xticklabels(), expected) + ) + + def test_barh_plot_labels_mixed_integer_string(self): + # GH39126 + # Test barh plot with string and integer at the same column + from matplotlib.text import Text + + df = DataFrame([{"word": 1, "value": 0}, {"word": "knowledge", "value": 2}]) + plot_barh = df.plot.barh(x="word", legend=None) + expected_yticklabels = [Text(0, 0, "1"), Text(0, 1, "knowledge")] + assert all( + actual.get_text() == expected.get_text() + for actual, expected in zip( + plot_barh.get_yticklabels(), expected_yticklabels + ) + ) + + def test_has_externally_shared_axis_x_axis(self): + # GH33819 + # Test _has_externally_shared_axis() works for x-axis + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(2, 4) + + # Create *externally* shared axes for first and third columns + plots[0][0] = fig.add_subplot(231, sharex=plots[1][0]) + plots[0][2] = fig.add_subplot(233, sharex=plots[1][2]) + + # Create *internally* shared axes for second and third columns + plots[0][1].twinx() + plots[0][2].twinx() + + # First column is only externally shared + # Second column is only internally shared + # Third column is both + # Fourth column is neither + assert func(plots[0][0], "x") + assert not func(plots[0][1], "x") + assert func(plots[0][2], "x") + assert not func(plots[0][3], "x") + + def test_has_externally_shared_axis_y_axis(self): + # GH33819 + # Test _has_externally_shared_axis() works for y-axis + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(4, 2) + + # Create *externally* shared axes for first and third rows + plots[0][0] = fig.add_subplot(321, sharey=plots[0][1]) + plots[2][0] = fig.add_subplot(325, sharey=plots[2][1]) + + # Create *internally* shared axes for second and third rows + plots[1][0].twiny() + plots[2][0].twiny() + + # First row is only externally shared + # Second row is only internally shared + # Third row is both + # Fourth row is neither + assert func(plots[0][0], "y") + assert not func(plots[1][0], "y") + assert func(plots[2][0], "y") + assert not func(plots[3][0], "y") + + def test_has_externally_shared_axis_invalid_compare_axis(self): + # GH33819 + # Test _has_externally_shared_axis() raises an exception when + # passed an invalid value as compare_axis parameter + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(4, 2) + + # Create arbitrary axes + plots[0][0] = fig.add_subplot(321, sharey=plots[0][1]) + + # Check that an invalid compare_axis value triggers the expected exception + msg = "needs 'x' or 'y' as a second parameter" + with pytest.raises(ValueError, match=msg): + func(plots[0][0], "z") + + def test_externally_shared_axes(self): + # Example from GH33819 + # Create data + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(1000), + "b": np.random.default_rng(2).standard_normal(1000), + } + ) + + # Create figure + fig = mpl.pyplot.figure() + plots = fig.subplots(2, 3) + + # Create *externally* shared axes + plots[0][0] = fig.add_subplot(231, sharex=plots[1][0]) + # note: no plots[0][1] that's the twin only case + plots[0][2] = fig.add_subplot(233, sharex=plots[1][2]) + + # Create *internally* shared axes + # note: no plots[0][0] that's the external only case + twin_ax1 = plots[0][1].twinx() + twin_ax2 = plots[0][2].twinx() + + # Plot data to primary axes + df["a"].plot(ax=plots[0][0], title="External share only").set_xlabel( + "this label should never be visible" + ) + df["a"].plot(ax=plots[1][0]) + + df["a"].plot(ax=plots[0][1], title="Internal share (twin) only").set_xlabel( + "this label should always be visible" + ) + df["a"].plot(ax=plots[1][1]) + + df["a"].plot(ax=plots[0][2], title="Both").set_xlabel( + "this label should never be visible" + ) + df["a"].plot(ax=plots[1][2]) + + # Plot data to twinned axes + df["b"].plot(ax=twin_ax1, color="green") + df["b"].plot(ax=twin_ax2, color="yellow") + + assert not plots[0][0].xaxis.get_label().get_visible() + assert plots[0][1].xaxis.get_label().get_visible() + assert not plots[0][2].xaxis.get_label().get_visible() + + def test_plot_bar_axis_units_timestamp_conversion(self): + # GH 38736 + # Ensure string x-axis from the second plot will not be converted to datetime + # due to axis data from first plot + df = DataFrame( + [1.0], + index=[Timestamp("2022-02-22 22:22:22")], + ) + _check_plot_works(df.plot) + s = Series({"A": 1.0}) + _check_plot_works(s.plot.bar) + + def test_bar_plt_xaxis_intervalrange(self): + # GH 38969 + # Ensure IntervalIndex x-axis produces a bar plot as expected + from matplotlib.text import Text + + expected = [Text(0, 0, "([0, 1],)"), Text(1, 0, "([1, 2],)")] + s = Series( + [1, 2], + index=[interval_range(0, 2, closed="both")], + ) + _check_plot_works(s.plot.bar) + assert all( + (a.get_text() == b.get_text()) + for a, b in zip(s.plot.bar().get_xticklabels(), expected) + ) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2f2f3b84307b9ed69e440d2ac0112abf153e67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py @@ -0,0 +1,985 @@ +""" Test cases for Series.plot """ +from datetime import datetime +from itertools import chain + +import numpy as np +import pytest + +from pandas.compat import is_platform_linux +from pandas.compat.numpy import np_version_gte1p24 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, + period_range, + plotting, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_colors, + _check_grid_settings, + _check_has_errorbars, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + _unpack_cycler, + get_y_axis, +) + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +@pytest.fixture +def ts(): + return Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + + +@pytest.fixture +def series(): + return Series( + range(20), dtype=np.float64, name="series", index=[f"i_{i}" for i in range(20)] + ) + + +class TestSeriesPlots: + @pytest.mark.slow + @pytest.mark.parametrize("kwargs", [{"label": "foo"}, {"use_index": False}]) + def test_plot(self, ts, kwargs): + _check_plot_works(ts.plot, **kwargs) + + @pytest.mark.slow + def test_plot_tick_props(self, ts): + axes = _check_plot_works(ts.plot, rot=0) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "scale, exp_scale", + [ + [{"logy": True}, {"yaxis": "log"}], + [{"logx": True}, {"xaxis": "log"}], + [{"loglog": True}, {"xaxis": "log", "yaxis": "log"}], + ], + ) + def test_plot_scales(self, ts, scale, exp_scale): + ax = _check_plot_works(ts.plot, style=".", **scale) + _check_ax_scales(ax, **exp_scale) + + @pytest.mark.slow + def test_plot_ts_bar(self, ts): + _check_plot_works(ts[:10].plot.bar) + + @pytest.mark.slow + def test_plot_ts_area_stacked(self, ts): + _check_plot_works(ts.plot.area, stacked=False) + + def test_plot_iseries(self): + ser = Series(range(5), period_range("2020-01-01", periods=5)) + _check_plot_works(ser.plot) + + @pytest.mark.parametrize( + "kind", + [ + "line", + "bar", + "barh", + pytest.param("kde", marks=td.skip_if_no("scipy")), + "hist", + "box", + ], + ) + def test_plot_series_kinds(self, series, kind): + _check_plot_works(series[:5].plot, kind=kind) + + def test_plot_series_barh(self, series): + _check_plot_works(series[:10].plot.barh) + + def test_plot_series_bar_ax(self): + ax = _check_plot_works( + Series(np.random.default_rng(2).standard_normal(10)).plot.bar, color="black" + ) + _check_colors([ax.patches[0]], facecolors=["black"]) + + @pytest.mark.parametrize("kwargs", [{}, {"layout": (-1, 1)}, {"layout": (1, -1)}]) + def test_plot_6951(self, ts, kwargs): + # GH 6951 + ax = _check_plot_works(ts.plot, subplots=True, **kwargs) + _check_axes_shape(ax, axes_num=1, layout=(1, 1)) + + def test_plot_figsize_and_title(self, series): + # figsize and title + _, ax = mpl.pyplot.subplots() + ax = series.plot(title="Test", figsize=(16, 8), ax=ax) + _check_text_labels(ax.title, "Test") + _check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8)) + + def test_dont_modify_rcParams(self): + # GH 8242 + key = "axes.prop_cycle" + colors = mpl.pyplot.rcParams[key] + _, ax = mpl.pyplot.subplots() + Series([1, 2, 3]).plot(ax=ax) + assert colors == mpl.pyplot.rcParams[key] + + @pytest.mark.parametrize("kwargs", [{}, {"secondary_y": True}]) + def test_ts_line_lim(self, ts, kwargs): + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax, **kwargs) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data(orig=False)[0][0] + assert xmax >= lines[0].get_data(orig=False)[0][-1] + + def test_ts_area_lim(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.area(stacked=False, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_ts_area_lim_xcompat(self, ts): + # GH 7471 + _, ax = mpl.pyplot.subplots() + ax = ts.plot.area(stacked=False, x_compat=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=30) + + def test_ts_tz_area_lim_xcompat(self, ts): + tz_ts = ts.copy() + tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET") + _, ax = mpl.pyplot.subplots() + ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_ts_tz_area_lim_xcompat_secondary_y(self, ts): + tz_ts = ts.copy() + tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET") + _, ax = mpl.pyplot.subplots() + ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_area_sharey_dont_overwrite(self, ts): + # GH37942 + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True) + + abs(ts).plot(ax=ax1, kind="area") + abs(ts).plot(ax=ax2, kind="area") + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + plt.close(fig) + + def test_label(self): + s = Series([1, 2]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(label="LABEL", legend=True, ax=ax) + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_label_none(self): + s = Series([1, 2]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=[""]) + mpl.pyplot.close("all") + + def test_label_ser_name(self): + s = Series([1, 2], name="NAME") + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=["NAME"]) + mpl.pyplot.close("all") + + def test_label_ser_name_override(self): + s = Series([1, 2], name="NAME") + # override the default + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, label="LABEL", ax=ax) + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_label_ser_name_override_dont_draw(self): + s = Series([1, 2], name="NAME") + # Add lebel info, but don't draw + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=False, label="LABEL", ax=ax) + assert ax.get_legend() is None # Hasn't been drawn + ax.legend() # draw it + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_boolean(self): + # GH 23719 + s = Series([False, False, True]) + _check_plot_works(s.plot, include_bool=True) + + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + _check_plot_works(s.plot) + + @pytest.mark.parametrize("index", [None, date_range("2020-01-01", periods=4)]) + def test_line_area_nan_series(self, index): + values = [1, 2, np.nan, 3] + d = Series(values, index=index) + ax = _check_plot_works(d.plot) + masked = ax.lines[0].get_ydata() + # remove nan for comparison purpose + exp = np.array([1, 2, 3], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp) + tm.assert_numpy_array_equal(masked.mask, np.array([False, False, True, False])) + + expected = np.array([1, 2, 0, 3], dtype=np.float64) + ax = _check_plot_works(d.plot, stacked=True) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + ax = _check_plot_works(d.plot.area) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + ax = _check_plot_works(d.plot.area, stacked=False) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + + def test_line_use_index_false(self): + s = Series([1, 2, 3], index=["a", "b", "c"]) + s.index.name = "The Index" + _, ax = mpl.pyplot.subplots() + ax = s.plot(use_index=False, ax=ax) + label = ax.get_xlabel() + assert label == "" + + def test_line_use_index_false_diff_var(self): + s = Series([1, 2, 3], index=["a", "b", "c"]) + s.index.name = "The Index" + _, ax = mpl.pyplot.subplots() + ax2 = s.plot.bar(use_index=False, ax=ax) + label2 = ax2.get_xlabel() + assert label2 == "" + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + @pytest.mark.parametrize("axis, meth", [("yaxis", "bar"), ("xaxis", "barh")]) + def test_bar_log(self, axis, meth): + expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]) + + _, ax = mpl.pyplot.subplots() + ax = getattr(Series([200, 500]).plot, meth)(log=True, ax=ax) + tm.assert_numpy_array_equal(getattr(ax, axis).get_ticklocs(), expected) + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + @pytest.mark.parametrize( + "axis, kind, res_meth", + [["yaxis", "bar", "get_ylim"], ["xaxis", "barh", "get_xlim"]], + ) + def test_bar_log_kind_bar(self, axis, kind, res_meth): + # GH 9905 + expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1]) + + _, ax = mpl.pyplot.subplots() + ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind=kind, ax=ax) + ymin = 0.0007943282347242822 + ymax = 0.12589254117941673 + res = getattr(ax, res_meth)() + tm.assert_almost_equal(res[0], ymin) + tm.assert_almost_equal(res[1], ymax) + tm.assert_numpy_array_equal(getattr(ax, axis).get_ticklocs(), expected) + + def test_bar_ignore_index(self): + df = Series([1, 2, 3, 4], index=["a", "b", "c", "d"]) + _, ax = mpl.pyplot.subplots() + ax = df.plot.bar(use_index=False, ax=ax) + _check_text_labels(ax.get_xticklabels(), ["0", "1", "2", "3"]) + + def test_bar_user_colors(self): + s = Series([1, 2, 3, 4]) + ax = s.plot.bar(color=["red", "blue", "blue", "red"]) + result = [p.get_facecolor() for p in ax.patches] + expected = [ + (1.0, 0.0, 0.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (1.0, 0.0, 0.0, 1.0), + ] + assert result == expected + + def test_rotation_default(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # Default rot 0 + _, ax = mpl.pyplot.subplots() + axes = df.plot(ax=ax) + _check_ticks_props(axes, xrot=0) + + def test_rotation_30(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + _, ax = mpl.pyplot.subplots() + axes = df.plot(rot=30, ax=ax) + _check_ticks_props(axes, xrot=30) + + def test_irregular_datetime(self): + from pandas.plotting._matplotlib.converter import DatetimeConverter + + rng = date_range("1/1/2000", "3/1/2000") + rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]] + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax) + ax.set_xlim("1/1/1999", "1/1/2001") + assert xp == ax.get_xlim()[0] + _check_ticks_props(ax, xrot=30) + + def test_unsorted_index_xlim(self): + ser = Series( + [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0], + index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0], + ) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data(orig=False)[0]) + assert xmax >= np.nanmax(lines[0].get_data(orig=False)[0]) + + def test_pie_series(self): + # if sum of values is less than 1.0, pie handle them as rate and draw + # semicircle. + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + ax = _check_plot_works(series.plot.pie) + _check_text_labels(ax.texts, series.index) + assert ax.get_ylabel() == "YLABEL" + + def test_pie_series_no_label(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + ax = _check_plot_works(series.plot.pie, labels=None) + _check_text_labels(ax.texts, [""] * 5) + + def test_pie_series_less_colors_than_elements(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + color_args = ["r", "g", "b"] + ax = _check_plot_works(series.plot.pie, colors=color_args) + + color_expected = ["r", "g", "b", "r", "g"] + _check_colors(ax.patches, facecolors=color_expected) + + def test_pie_series_labels_and_colors(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + # with labels and colors + labels = ["A", "B", "C", "D", "E"] + color_args = ["r", "g", "b", "c", "m"] + ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args) + _check_text_labels(ax.texts, labels) + _check_colors(ax.patches, facecolors=color_args) + + def test_pie_series_autopct_and_fontsize(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + color_args = ["r", "g", "b", "c", "m"] + ax = _check_plot_works( + series.plot.pie, colors=color_args, autopct="%.2f", fontsize=7 + ) + pcts = [f"{s*100:.2f}" for s in series.values / series.sum()] + expected_texts = list(chain.from_iterable(zip(series.index, pcts))) + _check_text_labels(ax.texts, expected_texts) + for t in ax.texts: + assert t.get_fontsize() == 7 + + def test_pie_series_negative_raises(self): + # includes negative value + series = Series([1, 2, 0, 4, -1], index=["a", "b", "c", "d", "e"]) + with pytest.raises(ValueError, match="pie plot doesn't allow negative values"): + series.plot.pie() + + def test_pie_series_nan(self): + # includes nan + series = Series([1, 2, np.nan, 4], index=["a", "b", "c", "d"], name="YLABEL") + ax = _check_plot_works(series.plot.pie) + _check_text_labels(ax.texts, ["a", "b", "", "d"]) + + def test_pie_nan(self): + s = Series([1, np.nan, 1, 1]) + _, ax = mpl.pyplot.subplots() + ax = s.plot.pie(legend=True, ax=ax) + expected = ["0", "", "2", "3"] + result = [x.get_text() for x in ax.texts] + assert result == expected + + def test_df_series_secondary_legend(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + + # primary -> secondary (without passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + s.plot(legend=True, secondary_y=True, ax=ax) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b", "c", "x (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_with_axes(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # primary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b", "c", "x (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (without passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, ax=ax) + s.plot(legend=True, secondary_y=True, ax=ax) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a (right)", "b (right)", "c (right)", "x (right)"] + _check_legend_labels(ax.left_ax, labels=expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both_with_axis(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a (right)", "b (right)", "c (right)", "x (right)"] + _check_legend_labels(ax.left_ax, expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both_with_axis_2(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, mark_right=False, ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a", "b", "c", "x (right)"] + _check_legend_labels(ax.left_ax, expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + @pytest.mark.parametrize( + "input_logy, expected_scale", [(True, "log"), ("sym", "symlog")] + ) + def test_secondary_logy(self, input_logy, expected_scale): + # GH 25545 + s1 = Series(np.random.default_rng(2).standard_normal(100)) + s2 = Series(np.random.default_rng(2).standard_normal(100)) + + # GH 24980 + ax1 = s1.plot(logy=input_logy) + ax2 = s2.plot(secondary_y=True, logy=input_logy) + + assert ax1.get_yscale() == expected_scale + assert ax2.get_yscale() == expected_scale + + def test_plot_fails_with_dupe_color_and_style(self): + x = Series(np.random.default_rng(2).standard_normal(2)) + _, ax = mpl.pyplot.subplots() + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' keyword " + "argument. Please use one or the other or pass 'style' without a color " + "symbol" + ) + with pytest.raises(ValueError, match=msg): + x.plot(style="k--", color="k", ax=ax) + + @pytest.mark.parametrize( + "bw_method, ind", + [ + ["scott", 20], + [None, 20], + [None, np.int_(20)], + [0.5, np.linspace(-100, 100, 20)], + ], + ) + def test_kde_kwargs(self, ts, bw_method, ind): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.kde, bw_method=bw_method, ind=ind) + + def test_density_kwargs(self, ts): + pytest.importorskip("scipy") + sample_points = np.linspace(-100, 100, 20) + _check_plot_works(ts.plot.density, bw_method=0.5, ind=sample_points) + + def test_kde_kwargs_check_axes(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + sample_points = np.linspace(-100, 100, 20) + ax = ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax) + _check_ax_scales(ax, yaxis="log") + _check_text_labels(ax.yaxis.get_label(), "Density") + + def test_kde_missing_vals(self): + pytest.importorskip("scipy") + s = Series(np.random.default_rng(2).uniform(size=50)) + s[0] = np.nan + axes = _check_plot_works(s.plot.kde) + + # gh-14821: check if the values have any missing values + assert any(~np.isnan(axes.lines[0].get_xdata())) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_boxplot_series(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.box(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + _check_text_labels(xlabels, [ts.name]) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + @pytest.mark.parametrize( + "kind", + plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds, + ) + def test_kind_kwarg(self, kind): + pytest.importorskip("scipy") + s = Series(range(3)) + _, ax = mpl.pyplot.subplots() + s.plot(kind=kind, ax=ax) + mpl.pyplot.close() + + @pytest.mark.parametrize( + "kind", + plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds, + ) + def test_kind_attr(self, kind): + pytest.importorskip("scipy") + s = Series(range(3)) + _, ax = mpl.pyplot.subplots() + getattr(s.plot, kind)() + mpl.pyplot.close() + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_invalid_plot_data(self, kind): + s = Series(list("abcd")) + _, ax = mpl.pyplot.subplots() + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + s.plot(kind=kind, ax=ax) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_valid_object_plot(self, kind): + pytest.importorskip("scipy") + s = Series(range(10), dtype=object) + _check_plot_works(s.plot, kind=kind) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_partially_invalid_plot_data(self, kind): + s = Series(["a", "b", 1.0, 2]) + _, ax = mpl.pyplot.subplots() + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + s.plot(kind=kind, ax=ax) + + def test_invalid_kind(self): + s = Series([1, 2]) + with pytest.raises(ValueError, match="invalid_kind is not a valid plot kind"): + s.plot(kind="invalid_kind") + + def test_dup_datetime_index_plot(self): + dr1 = date_range("1/1/2009", periods=4) + dr2 = date_range("1/2/2009", periods=4) + index = dr1.append(dr2) + values = np.random.default_rng(2).standard_normal(index.size) + s = Series(values, index=index) + _check_plot_works(s.plot) + + def test_errorbar_asymmetrical(self): + # GH9536 + s = Series(np.arange(10), name="x") + err = np.random.default_rng(2).random((2, 10)) + + ax = s.plot(yerr=err, xerr=err) + + result = np.vstack([i.vertices[:, 1] for i in ax.collections[1].get_paths()]) + expected = (err.T * np.array([-1, 1])) + s.to_numpy().reshape(-1, 1) + tm.assert_numpy_array_equal(result, expected) + + msg = ( + "Asymmetrical error bars should be provided " + f"with the shape \\(2, {len(s)}\\)" + ) + with pytest.raises(ValueError, match=msg): + s.plot(yerr=np.random.default_rng(2).random((2, 11))) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar"]) + @pytest.mark.parametrize( + "yerr", + [ + Series(np.abs(np.random.default_rng(2).standard_normal(10))), + np.abs(np.random.default_rng(2).standard_normal(10)), + list(np.abs(np.random.default_rng(2).standard_normal(10))), + DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), + columns=["x", "y"], + ), + ], + ) + def test_errorbar_plot(self, kind, yerr): + s = Series(np.arange(10), name="x") + ax = _check_plot_works(s.plot, yerr=yerr, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_yerr_0(self): + s = Series(np.arange(10), name="x") + s_err = np.abs(np.random.default_rng(2).standard_normal(10)) + ax = _check_plot_works(s.plot, xerr=s_err) + _check_has_errorbars(ax, xerr=1, yerr=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "yerr", + [ + Series(np.abs(np.random.default_rng(2).standard_normal(12))), + DataFrame( + np.abs(np.random.default_rng(2).standard_normal((12, 2))), + columns=["x", "y"], + ), + ], + ) + def test_errorbar_plot_ts(self, yerr): + # test time series plotting + ix = date_range("1/1/2000", "1/1/2001", freq="ME") + ts = Series(np.arange(12), index=ix, name="x") + yerr.index = ix + + ax = _check_plot_works(ts.plot, yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_invalid_yerr_shape(self): + s = Series(np.arange(10), name="x") + # check incorrect lengths and types + with tm.external_error_raised(ValueError): + s.plot(yerr=np.arange(11)) + + @pytest.mark.slow + def test_errorbar_plot_invalid_yerr(self): + s = Series(np.arange(10), name="x") + s_err = ["zzz"] * 10 + with tm.external_error_raised(TypeError): + s.plot(yerr=s_err) + + @pytest.mark.slow + def test_table_true(self, series): + _check_plot_works(series.plot, table=True) + + @pytest.mark.slow + def test_table_self(self, series): + _check_plot_works(series.plot, table=series) + + @pytest.mark.slow + def test_series_grid_settings(self): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + pytest.importorskip("scipy") + _check_grid_settings( + Series([1, 2, 3]), + plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds, + ) + + @pytest.mark.parametrize("c", ["r", "red", "green", "#FF0000"]) + def test_standard_colors(self, c): + from pandas.plotting._matplotlib.style import get_standard_colors + + result = get_standard_colors(1, color=c) + assert result == [c] + + result = get_standard_colors(1, color=[c]) + assert result == [c] + + result = get_standard_colors(3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(3, color=[c]) + assert result == [c] * 3 + + def test_standard_colors_all(self): + from matplotlib import colors + + from pandas.plotting._matplotlib.style import get_standard_colors + + # multiple colors like mediumaquamarine + for c in colors.cnames: + result = get_standard_colors(num_colors=1, color=c) + assert result == [c] + + result = get_standard_colors(num_colors=1, color=[c]) + assert result == [c] + + result = get_standard_colors(num_colors=3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(num_colors=3, color=[c]) + assert result == [c] * 3 + + # single letter colors like k + for c in colors.ColorConverter.colors: + result = get_standard_colors(num_colors=1, color=c) + assert result == [c] + + result = get_standard_colors(num_colors=1, color=[c]) + assert result == [c] + + result = get_standard_colors(num_colors=3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(num_colors=3, color=[c]) + assert result == [c] * 3 + + def test_series_plot_color_kwargs(self): + # GH1890 + _, ax = mpl.pyplot.subplots() + ax = Series(np.arange(12) + 1).plot(color="green", ax=ax) + _check_colors(ax.get_lines(), linecolors=["green"]) + + def test_time_series_plot_color_kwargs(self): + # #1890 + _, ax = mpl.pyplot.subplots() + ax = Series(np.arange(12) + 1, index=date_range("1/1/2000", periods=12)).plot( + color="green", ax=ax + ) + _check_colors(ax.get_lines(), linecolors=["green"]) + + def test_time_series_plot_color_with_empty_kwargs(self): + import matplotlib as mpl + + def_colors = _unpack_cycler(mpl.rcParams) + index = date_range("1/1/2000", periods=12) + s = Series(np.arange(1, 13), index=index) + + ncolors = 3 + + _, ax = mpl.pyplot.subplots() + for i in range(ncolors): + ax = s.plot(ax=ax) + _check_colors(ax.get_lines(), linecolors=def_colors[:ncolors]) + + def test_xticklabels(self): + # GH11529 + s = Series(np.arange(10), index=[f"P{i:02d}" for i in range(10)]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(xticks=[0, 3, 5, 9], ax=ax) + exp = [f"P{i:02d}" for i in [0, 3, 5, 9]] + _check_text_labels(ax.get_xticklabels(), exp) + + def test_xtick_barPlot(self): + # GH28172 + s = Series(range(10), index=[f"P{i:02d}" for i in range(10)]) + ax = s.plot.bar(xticks=range(0, 11, 2)) + exp = np.array(list(range(0, 11, 2))) + tm.assert_numpy_array_equal(exp, ax.get_xticks()) + + def test_custom_business_day_freq(self): + # GH7222 + from pandas.tseries.offsets import CustomBusinessDay + + s = Series( + range(100, 121), + index=pd.bdate_range( + start="2014-05-01", + end="2014-06-01", + freq=CustomBusinessDay(holidays=["2014-05-26"]), + ), + ) + + _check_plot_works(s.plot) + + @pytest.mark.xfail( + reason="GH#24426, see also " + "github.com/pandas-dev/pandas/commit/" + "ef1bd69fa42bbed5d09dd17f08c44fc8bfc2b685#r61470674" + ) + def test_plot_accessor_updates_on_inplace(self): + ser = Series([1, 2, 3, 4]) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + before = ax.xaxis.get_ticklocs() + + ser.drop([0, 1], inplace=True) + _, ax = mpl.pyplot.subplots() + after = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(before, after) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_plot_xlim_for_series(self, kind): + # test if xlim is also correctly plotted in Series for line and area + # GH 27686 + s = Series([2, 3]) + _, ax = mpl.pyplot.subplots() + s.plot(kind=kind, ax=ax) + xlims = ax.get_xlim() + + assert xlims[0] < 0 + assert xlims[1] > 1 + + def test_plot_no_rows(self): + # GH 27758 + df = Series(dtype=int) + assert df.empty + ax = df.plot() + assert len(ax.get_lines()) == 1 + line = ax.get_lines()[0] + assert len(line.get_xdata()) == 0 + assert len(line.get_ydata()) == 0 + + def test_plot_no_numeric_data(self): + df = Series(["a", "b", "c"]) + with pytest.raises(TypeError, match="no numeric data to plot"): + df.plot() + + @pytest.mark.parametrize( + "data, index", + [ + ([1, 2, 3, 4], [3, 2, 1, 0]), + ([10, 50, 20, 30], [1910, 1920, 1980, 1950]), + ], + ) + def test_plot_order(self, data, index): + # GH38865 Verify plot order of a Series + ser = Series(data=data, index=index) + ax = ser.plot(kind="bar") + + expected = ser.tolist() + result = [ + patch.get_bbox().ymax + for patch in sorted(ax.patches, key=lambda patch: patch.get_bbox().xmax) + ] + assert expected == result + + def test_style_single_ok(self): + s = Series([1, 2]) + ax = s.plot(style="s", color="C3") + assert ax.lines[0].get_color() == "C3" + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [(None, "", "new"), ("old", "old", "new"), (None, "", "")], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar", "barh", "hist"]) + def test_xlabel_ylabel_series(self, kind, index_name, old_label, new_label): + # GH 9093 + ser = Series([1, 2, 3, 4]) + ser.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name (reverse for barh) + ax = ser.plot(kind=kind) + if kind == "barh": + assert ax.get_xlabel() == "" + assert ax.get_ylabel() == old_label + elif kind == "hist": + assert ax.get_xlabel() == "" + assert ax.get_ylabel() == "Frequency" + else: + assert ax.get_ylabel() == "" + assert ax.get_xlabel() == old_label + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + ax = ser.plot(kind=kind, ylabel=new_label, xlabel=new_label) + assert ax.get_ylabel() == new_label + assert ax.get_xlabel() == new_label + + @pytest.mark.parametrize( + "index", + [ + pd.timedelta_range(start=0, periods=2, freq="D"), + [pd.Timedelta(days=1), pd.Timedelta(days=2)], + ], + ) + def test_timedelta_index(self, index): + # GH37454 + xlims = (3, 1) + ax = Series([1, 2], index=index).plot(xlim=(xlims)) + assert ax.get_xlim() == (3, 1) + + def test_series_none_color(self): + # GH51953 + series = Series([1, 2, 3]) + ax = series.plot(color=None) + expected = _unpack_cycler(mpl.pyplot.rcParams)[:1] + _check_colors(ax.get_lines(), linecolors=expected) + + @pytest.mark.slow + def test_plot_no_warning(self, ts): + # GH 55138 + # TODO(3.0): this can be removed once Period[B] deprecation is enforced + with tm.assert_produces_warning(False): + _ = ts.plot() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py new file mode 100644 index 0000000000000000000000000000000000000000..665bda15724fd67dc9917509d2b95957b03107e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py @@ -0,0 +1,157 @@ +import pytest + +from pandas import Series + +pytest.importorskip("matplotlib") +from pandas.plotting._matplotlib.style import get_standard_colors + + +class TestGetStandardColors: + @pytest.mark.parametrize( + "num_colors, expected", + [ + (3, ["red", "green", "blue"]), + (5, ["red", "green", "blue", "red", "green"]), + (7, ["red", "green", "blue", "red", "green", "blue", "red"]), + (2, ["red", "green"]), + (1, ["red"]), + ], + ) + def test_default_colors_named_from_prop_cycle(self, num_colors, expected): + import matplotlib as mpl + from matplotlib.pyplot import cycler + + mpl_params = { + "axes.prop_cycle": cycler(color=["red", "green", "blue"]), + } + with mpl.rc_context(rc=mpl_params): + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["b"]), + (3, ["b", "g", "r"]), + (4, ["b", "g", "r", "y"]), + (5, ["b", "g", "r", "y", "b"]), + (7, ["b", "g", "r", "y", "b", "g", "r"]), + ], + ) + def test_default_colors_named_from_prop_cycle_string(self, num_colors, expected): + import matplotlib as mpl + from matplotlib.pyplot import cycler + + mpl_params = { + "axes.prop_cycle": cycler(color="bgry"), + } + with mpl.rc_context(rc=mpl_params): + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected_name", + [ + (1, ["C0"]), + (3, ["C0", "C1", "C2"]), + ( + 12, + [ + "C0", + "C1", + "C2", + "C3", + "C4", + "C5", + "C6", + "C7", + "C8", + "C9", + "C0", + "C1", + ], + ), + ], + ) + def test_default_colors_named_undefined_prop_cycle(self, num_colors, expected_name): + import matplotlib as mpl + import matplotlib.colors as mcolors + + with mpl.rc_context(rc={}): + expected = [mcolors.to_hex(x) for x in expected_name] + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["red", "green", (0.1, 0.2, 0.3)]), + (2, ["red", "green", (0.1, 0.2, 0.3)]), + (3, ["red", "green", (0.1, 0.2, 0.3)]), + (4, ["red", "green", (0.1, 0.2, 0.3), "red"]), + ], + ) + def test_user_input_color_sequence(self, num_colors, expected): + color = ["red", "green", (0.1, 0.2, 0.3)] + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["r", "g", "b", "k"]), + (2, ["r", "g", "b", "k"]), + (3, ["r", "g", "b", "k"]), + (4, ["r", "g", "b", "k"]), + (5, ["r", "g", "b", "k", "r"]), + (6, ["r", "g", "b", "k", "r", "g"]), + ], + ) + def test_user_input_color_string(self, num_colors, expected): + color = "rgbk" + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, [(0.1, 0.2, 0.3)]), + (2, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]), + (3, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]), + ], + ) + def test_user_input_color_floats(self, num_colors, expected): + color = (0.1, 0.2, 0.3) + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "color, num_colors, expected", + [ + ("Crimson", 1, ["Crimson"]), + ("DodgerBlue", 2, ["DodgerBlue", "DodgerBlue"]), + ("firebrick", 3, ["firebrick", "firebrick", "firebrick"]), + ], + ) + def test_user_input_named_color_string(self, color, num_colors, expected): + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize("color", ["", [], (), Series([], dtype="object")]) + def test_empty_color_raises(self, color): + with pytest.raises(ValueError, match="Invalid color argument"): + get_standard_colors(color=color, num_colors=1) + + @pytest.mark.parametrize( + "color", + [ + "bad_color", + ("red", "green", "bad_color"), + (0.1,), + (0.1, 0.2), + (0.1, 0.2, 0.3, 0.4, 0.5), # must be either 3 or 4 floats + ], + ) + def test_bad_color_raises(self, color): + with pytest.raises(ValueError, match="Invalid color"): + get_standard_colors(color=color, num_colors=5)