diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_constructors.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..f3ac60f672ee1221a9b1b43faf7c2e023d4b9d3b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_constructors.py @@ -0,0 +1,179 @@ +from datetime import datetime +import sys + +import numpy as np +import pytest + +from pandas.compat import PYPY + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm +from pandas.core.accessor import PandasDelegate +from pandas.core.base import ( + NoNewAttributesMixin, + PandasObject, +) + + +def series_via_frame_from_dict(x, **kwargs): + return DataFrame({"a": x}, **kwargs)["a"] + + +def series_via_frame_from_scalar(x, **kwargs): + return DataFrame(x, **kwargs)[0] + + +@pytest.fixture( + params=[ + Series, + series_via_frame_from_dict, + series_via_frame_from_scalar, + Index, + ], + ids=["Series", "DataFrame-dict", "DataFrame-array", "Index"], +) +def constructor(request): + return request.param + + +class TestPandasDelegate: + class Delegator: + _properties = ["prop"] + _methods = ["test_method"] + + def _set_prop(self, value): + self.prop = value + + def _get_prop(self): + return self.prop + + prop = property(_get_prop, _set_prop, doc="foo property") + + def test_method(self, *args, **kwargs): + """a test method""" + + class Delegate(PandasDelegate, PandasObject): + def __init__(self, obj) -> None: + self.obj = obj + + def test_invalid_delegation(self): + # these show that in order for the delegation to work + # the _delegate_* methods need to be overridden to not raise + # a TypeError + + self.Delegate._add_delegate_accessors( + delegate=self.Delegator, + accessors=self.Delegator._properties, + typ="property", + ) + self.Delegate._add_delegate_accessors( + delegate=self.Delegator, accessors=self.Delegator._methods, typ="method" + ) + + delegate = self.Delegate(self.Delegator()) + + msg = "You cannot access the property prop" + with pytest.raises(TypeError, match=msg): + delegate.prop + + msg = "The property prop cannot be set" + with pytest.raises(TypeError, match=msg): + delegate.prop = 5 + + msg = "You cannot access the property prop" + with pytest.raises(TypeError, match=msg): + delegate.prop + + @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") + def test_memory_usage(self): + # Delegate does not implement memory_usage. + # Check that we fall back to in-built `__sizeof__` + # GH 12924 + delegate = self.Delegate(self.Delegator()) + sys.getsizeof(delegate) + + +class TestNoNewAttributesMixin: + def test_mixin(self): + class T(NoNewAttributesMixin): + pass + + t = T() + assert not hasattr(t, "__frozen") + + t.a = "test" + assert t.a == "test" + + t._freeze() + assert "__frozen" in dir(t) + assert getattr(t, "__frozen") + msg = "You cannot add any new attribute" + with pytest.raises(AttributeError, match=msg): + t.b = "test" + + assert not hasattr(t, "b") + + +class TestConstruction: + # test certain constructor behaviours on dtype inference across Series, + # Index and DataFrame + + @pytest.mark.parametrize( + "a", + [ + np.array(["2263-01-01"], dtype="datetime64[D]"), + np.array([datetime(2263, 1, 1)], dtype=object), + np.array([np.datetime64("2263-01-01", "D")], dtype=object), + np.array(["2263-01-01"], dtype=object), + ], + ids=[ + "datetime64[D]", + "object-datetime.datetime", + "object-numpy-scalar", + "object-string", + ], + ) + def test_constructor_datetime_outofbound( + self, a, constructor, request, using_infer_string + ): + # GH-26853 (+ bug GH-26206 out of bound non-ns unit) + + # No dtype specified (dtype inference) + # datetime64[non-ns] raise error, other cases result in object dtype + # and preserve original data + if a.dtype.kind == "M": + # Can't fit in nanosecond bounds -> get the nearest supported unit + result = constructor(a) + assert result.dtype == "M8[s]" + else: + result = constructor(a) + if using_infer_string and "object-string" in request.node.callspec.id: + assert result.dtype == "string" + else: + assert result.dtype == "object" + tm.assert_numpy_array_equal(result.to_numpy(), a) + + # Explicit dtype specified + # Forced conversion fails for all -> all cases raise error + msg = "Out of bounds|Out of bounds .* present at position 0" + with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg): + constructor(a, dtype="datetime64[ns]") + + def test_constructor_datetime_nonns(self, constructor): + arr = np.array(["2020-01-01T00:00:00.000000"], dtype="datetime64[us]") + dta = pd.core.arrays.DatetimeArray._simple_new(arr, dtype=arr.dtype) + expected = constructor(dta) + assert expected.dtype == arr.dtype + + result = constructor(arr) + tm.assert_equal(result, expected) + + # https://github.com/pandas-dev/pandas/issues/34843 + arr.flags.writeable = False + result = constructor(arr) + tm.assert_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py new file mode 100644 index 0000000000000000000000000000000000000000..246f33d27476cb419620fb8571984619785f9b62 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py @@ -0,0 +1,56 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalDtype, + DataFrame, +) +import pandas._testing as tm + + +def test_transpose(index_or_series_obj): + obj = index_or_series_obj + tm.assert_equal(obj.transpose(), obj) + + +def test_transpose_non_default_axes(index_or_series_obj): + msg = "the 'axes' parameter is not supported" + obj = index_or_series_obj + with pytest.raises(ValueError, match=msg): + obj.transpose(1) + with pytest.raises(ValueError, match=msg): + obj.transpose(axes=1) + + +def test_numpy_transpose(index_or_series_obj): + msg = "the 'axes' parameter is not supported" + obj = index_or_series_obj + tm.assert_equal(np.transpose(obj), obj) + + with pytest.raises(ValueError, match=msg): + np.transpose(obj, axes=1) + + +@pytest.mark.parametrize( + "data, transposed_data, index, columns, dtype", + [ + ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], int), + ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], CategoricalDtype([1, 2])), + ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], int), + ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], CategoricalDtype([1, 2])), + ([[1, 2], [3, 4]], [[1, 3], [2, 4]], ["a", "a"], ["b", "b"], int), + ( + [[1, 2], [3, 4]], + [[1, 3], [2, 4]], + ["a", "a"], + ["b", "b"], + CategoricalDtype([1, 2, 3, 4]), + ), + ], +) +def test_duplicate_labels(data, transposed_data, index, columns, dtype): + # GH 42380 + df = DataFrame(data, index=index, columns=columns, dtype=dtype) + result = df.T + expected = DataFrame(transposed_data, index=columns, columns=index, dtype=dtype) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_unique.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_unique.py new file mode 100644 index 0000000000000000000000000000000000000000..d3fe144f70cfc2b54d978ab80ba23ef896948b9c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_unique.py @@ -0,0 +1,124 @@ +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas as pd +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_unique(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + result = obj.unique() + + # dict.fromkeys preserves the order + unique_values = list(dict.fromkeys(obj.values)) + if isinstance(obj, pd.MultiIndex): + expected = pd.MultiIndex.from_tuples(unique_values) + expected.names = obj.names + tm.assert_index_equal(result, expected, exact=True) + elif isinstance(obj, pd.Index): + expected = pd.Index(unique_values, dtype=obj.dtype) + if isinstance(obj.dtype, pd.DatetimeTZDtype): + expected = expected.normalize() + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(unique_values) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_unique_null(null_obj, index_or_series_obj): + obj = index_or_series_obj + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(obj, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + result = obj.unique() + + unique_values_raw = dict.fromkeys(obj.values) + # because np.nan == np.nan is False, but None == None is True + # np.nan would be duplicated, whereas None wouldn't + unique_values_not_null = [val for val in unique_values_raw if not pd.isnull(val)] + unique_values = [null_obj] + unique_values_not_null + + if isinstance(obj, pd.Index): + expected = pd.Index(unique_values, dtype=obj.dtype) + if isinstance(obj.dtype, pd.DatetimeTZDtype): + result = result.normalize() + expected = expected.normalize() + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(unique_values, dtype=obj.dtype) + tm.assert_numpy_array_equal(result, expected) + + +def test_nunique(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + expected = len(obj.unique()) + assert obj.nunique(dropna=False) == expected + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_nunique_null(null_obj, index_or_series_obj): + obj = index_or_series_obj + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif isinstance(obj, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + + if isinstance(obj, pd.CategoricalIndex): + assert obj.nunique() == len(obj.categories) + assert obj.nunique(dropna=False) == len(obj.categories) + 1 + else: + num_unique_values = len(obj.unique()) + assert obj.nunique() == max(0, num_unique_values - 1) + assert obj.nunique(dropna=False) == max(0, num_unique_values) + + +@pytest.mark.single_cpu +@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="decoding fails") +def test_unique_bad_unicode(index_or_series): + # regression test for #34550 + uval = "\ud83d" # smiley emoji + + obj = index_or_series([uval] * 2) + result = obj.unique() + + if isinstance(obj, pd.Index): + expected = pd.Index(["\ud83d"], dtype=object) + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(["\ud83d"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_nunique_dropna(dropna): + # GH37566 + ser = pd.Series(["yes", "yes", pd.NA, np.nan, None, pd.NaT]) + res = ser.nunique(dropna) + assert res == 1 if dropna else 5 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_value_counts.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_value_counts.py new file mode 100644 index 0000000000000000000000000000000000000000..27296663988774ff01af6497060b63019c7deeb9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_value_counts.py @@ -0,0 +1,356 @@ +import collections +from datetime import timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + Interval, + IntervalIndex, + MultiIndex, + Series, + Timedelta, + TimedeltaIndex, + array, +) +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_value_counts(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + result = obj.value_counts() + + counter = collections.Counter(obj) + expected = Series(dict(counter.most_common()), dtype=np.int64, name="count") + + if obj.dtype != np.float16: + expected.index = expected.index.astype(obj.dtype) + else: + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + expected.index.astype(obj.dtype) + return + if isinstance(expected.index, MultiIndex): + expected.index.names = obj.names + else: + expected.index.name = obj.name + + if not isinstance(result.dtype, np.dtype): + if getattr(obj.dtype, "storage", "") == "pyarrow": + expected = expected.astype("int64[pyarrow]") + else: + # i.e IntegerDtype + expected = expected.astype("Int64") + + # TODO(GH#32514): Order of entries with the same count is inconsistent + # on CI (gh-32449) + if obj.duplicated().any(): + result = result.sort_index() + expected = expected.sort_index() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_value_counts_null(null_obj, index_or_series_obj): + orig = index_or_series_obj + obj = orig.copy() + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(orig, MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + + # because np.nan == np.nan is False, but None == None is True + # np.nan would be duplicated, whereas None wouldn't + counter = collections.Counter(obj.dropna()) + expected = Series(dict(counter.most_common()), dtype=np.int64, name="count") + + if obj.dtype != np.float16: + expected.index = expected.index.astype(obj.dtype) + else: + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + expected.index.astype(obj.dtype) + return + expected.index.name = obj.name + + result = obj.value_counts() + if obj.duplicated().any(): + # TODO(GH#32514): + # Order of entries with the same count is inconsistent on CI (gh-32449) + expected = expected.sort_index() + result = result.sort_index() + + if not isinstance(result.dtype, np.dtype): + if getattr(obj.dtype, "storage", "") == "pyarrow": + expected = expected.astype("int64[pyarrow]") + else: + # i.e IntegerDtype + expected = expected.astype("Int64") + tm.assert_series_equal(result, expected) + + expected[null_obj] = 3 + + result = obj.value_counts(dropna=False) + if obj.duplicated().any(): + # TODO(GH#32514): + # Order of entries with the same count is inconsistent on CI (gh-32449) + expected = expected.sort_index() + result = result.sort_index() + tm.assert_series_equal(result, expected) + + +def test_value_counts_inferred(index_or_series, using_infer_string): + klass = index_or_series + s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"] + s = klass(s_values) + expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"], name="count") + tm.assert_series_equal(s.value_counts(), expected) + + if isinstance(s, Index): + exp = Index(np.unique(np.array(s_values, dtype=np.object_))) + tm.assert_index_equal(s.unique(), exp) + else: + exp = np.unique(np.array(s_values, dtype=np.object_)) + if using_infer_string: + exp = array(exp) + tm.assert_equal(s.unique(), exp) + + assert s.nunique() == 4 + # don't sort, have to sort after the fact as not sorting is + # platform-dep + hist = s.value_counts(sort=False).sort_values() + expected = Series([3, 1, 4, 2], index=list("acbd"), name="count").sort_values() + tm.assert_series_equal(hist, expected) + + # sort ascending + hist = s.value_counts(ascending=True) + expected = Series([1, 2, 3, 4], index=list("cdab"), name="count") + tm.assert_series_equal(hist, expected) + + # relative histogram. + hist = s.value_counts(normalize=True) + expected = Series( + [0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"], name="proportion" + ) + tm.assert_series_equal(hist, expected) + + +def test_value_counts_bins(index_or_series, using_infer_string): + klass = index_or_series + s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"] + s = klass(s_values) + + # bins + msg = "bins argument only works with numeric data" + with pytest.raises(TypeError, match=msg): + s.value_counts(bins=1) + + s1 = Series([1, 1, 2, 3]) + res1 = s1.value_counts(bins=1) + exp1 = Series({Interval(0.997, 3.0): 4}, name="count") + tm.assert_series_equal(res1, exp1) + res1n = s1.value_counts(bins=1, normalize=True) + exp1n = Series({Interval(0.997, 3.0): 1.0}, name="proportion") + tm.assert_series_equal(res1n, exp1n) + + if isinstance(s1, Index): + tm.assert_index_equal(s1.unique(), Index([1, 2, 3])) + else: + exp = np.array([1, 2, 3], dtype=np.int64) + tm.assert_numpy_array_equal(s1.unique(), exp) + + assert s1.nunique() == 3 + + # these return the same + res4 = s1.value_counts(bins=4, dropna=True) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]), name="count") + tm.assert_series_equal(res4, exp4) + + res4 = s1.value_counts(bins=4, dropna=False) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]), name="count") + tm.assert_series_equal(res4, exp4) + + res4n = s1.value_counts(bins=4, normalize=True) + exp4n = Series( + [0.5, 0.25, 0.25, 0], index=intervals.take([0, 1, 3, 2]), name="proportion" + ) + tm.assert_series_equal(res4n, exp4n) + + # handle NA's properly + s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"] + s = klass(s_values) + expected = Series([4, 3, 2], index=["b", "a", "d"], name="count") + tm.assert_series_equal(s.value_counts(), expected) + + if isinstance(s, Index): + exp = Index(["a", "b", np.nan, "d"]) + tm.assert_index_equal(s.unique(), exp) + else: + exp = np.array(["a", "b", np.nan, "d"], dtype=object) + if using_infer_string: + exp = array(exp) + tm.assert_equal(s.unique(), exp) + assert s.nunique() == 3 + + s = klass({}) if klass is dict else klass({}, dtype=object) + expected = Series([], dtype=np.int64, name="count") + tm.assert_series_equal(s.value_counts(), expected, check_index_type=False) + # returned dtype differs depending on original + if isinstance(s, Index): + tm.assert_index_equal(s.unique(), Index([]), exact=False) + else: + tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False) + + assert s.nunique() == 0 + + +def test_value_counts_datetime64(index_or_series, unit): + klass = index_or_series + + # GH 3002, datetime64[ns] + # don't test names though + df = pd.DataFrame( + { + "person_id": ["xxyyzz", "xxyyzz", "xxyyzz", "xxyyww", "foofoo", "foofoo"], + "dt": pd.to_datetime( + [ + "2010-01-01", + "2010-01-01", + "2010-01-01", + "2009-01-01", + "2008-09-09", + "2008-09-09", + ] + ).as_unit(unit), + "food": ["PIE", "GUM", "EGG", "EGG", "PIE", "GUM"], + } + ) + + s = klass(df["dt"].copy()) + s.name = None + idx = pd.to_datetime( + ["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"] + ).as_unit(unit) + expected_s = Series([3, 2, 1], index=idx, name="count") + tm.assert_series_equal(s.value_counts(), expected_s) + + expected = array( + np.array( + ["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"], + dtype=f"datetime64[{unit}]", + ) + ) + result = s.unique() + if isinstance(s, Index): + tm.assert_index_equal(result, DatetimeIndex(expected)) + else: + tm.assert_extension_array_equal(result, expected) + + assert s.nunique() == 3 + + # with NaT + s = df["dt"].copy() + s = klass(list(s.values) + [pd.NaT] * 4) + if klass is Series: + s = s.dt.as_unit(unit) + else: + s = s.as_unit(unit) + + result = s.value_counts() + assert result.index.dtype == f"datetime64[{unit}]" + tm.assert_series_equal(result, expected_s) + + result = s.value_counts(dropna=False) + expected_s = pd.concat( + [ + Series([4], index=DatetimeIndex([pd.NaT]).as_unit(unit), name="count"), + expected_s, + ] + ) + tm.assert_series_equal(result, expected_s) + + assert s.dtype == f"datetime64[{unit}]" + unique = s.unique() + assert unique.dtype == f"datetime64[{unit}]" + + # numpy_array_equal cannot compare pd.NaT + if isinstance(s, Index): + exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT]).as_unit(unit) + tm.assert_index_equal(unique, exp_idx) + else: + tm.assert_extension_array_equal(unique[:3], expected) + assert pd.isna(unique[3]) + + assert s.nunique() == 3 + assert s.nunique(dropna=False) == 4 + + +def test_value_counts_timedelta64(index_or_series, unit): + # timedelta64[ns] + klass = index_or_series + + day = Timedelta(timedelta(1)).as_unit(unit) + tdi = TimedeltaIndex([day], name="dt").as_unit(unit) + + tdvals = np.zeros(6, dtype=f"m8[{unit}]") + day + td = klass(tdvals, name="dt") + + result = td.value_counts() + expected_s = Series([6], index=tdi, name="count") + tm.assert_series_equal(result, expected_s) + + expected = tdi + result = td.unique() + if isinstance(td, Index): + tm.assert_index_equal(result, expected) + else: + tm.assert_extension_array_equal(result, expected._values) + + td2 = day + np.zeros(6, dtype=f"m8[{unit}]") + td2 = klass(td2, name="dt") + result2 = td2.value_counts() + tm.assert_series_equal(result2, expected_s) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_value_counts_with_nan(dropna, index_or_series): + # GH31944 + klass = index_or_series + values = [True, pd.NA, np.nan] + obj = klass(values) + res = obj.value_counts(dropna=dropna) + if dropna is True: + expected = Series([1], index=Index([True], dtype=obj.dtype), name="count") + else: + expected = Series([1, 1, 1], index=[True, pd.NA, np.nan], name="count") + tm.assert_series_equal(res, expected) + + +def test_value_counts_object_inference_deprecated(): + # GH#56161 + dti = pd.date_range("2016-01-01", periods=3, tz="UTC") + + idx = dti.astype(object) + msg = "The behavior of value_counts with object-dtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = idx.value_counts() + + exp = dti.value_counts() + tm.assert_series_equal(res, exp) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74ba780dfa693f06eab46b38249b2fadeb9fbbfa Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a56b9a9be9de4175984c305e2bbad95a706bdea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2b695f17637cbdce0ea7d44dec7797f16b2c915 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6aa85aa5e9a7fd010a2d1d6fedd714a96d545ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa1a5f45e86b0e6662f5c70fefd0918fb8500c65 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3932fd13b981854bd02de0b5337654b947eb388 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62f4c52e219f9bcb55eb78761c977eceff15bea9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dfbeea5bbcf5996ecf69c88f2ba870b1184dd47 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_duplicate_labels.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_duplicate_labels.py new file mode 100644 index 0000000000000000000000000000000000000000..f54db07824daf15eb01c32490495deff3736b14d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_duplicate_labels.py @@ -0,0 +1,413 @@ +"""Tests dealing with the NDFrame.allows_duplicates.""" +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +not_implemented = pytest.mark.xfail(reason="Not implemented.") + +# ---------------------------------------------------------------------------- +# Preservation + + +class TestPreserves: + @pytest.mark.parametrize( + "cls, data", + [ + (pd.Series, np.array([])), + (pd.Series, [1, 2]), + (pd.DataFrame, {}), + (pd.DataFrame, {"A": [1, 2]}), + ], + ) + def test_construction_ok(self, cls, data): + result = cls(data) + assert result.flags.allows_duplicate_labels is True + + result = cls(data).set_flags(allows_duplicate_labels=False) + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "func", + [ + operator.itemgetter(["a"]), + operator.methodcaller("add", 1), + operator.methodcaller("rename", str.upper), + operator.methodcaller("rename", "name"), + operator.methodcaller("abs"), + np.abs, + ], + ) + def test_preserved_series(self, func): + s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + assert func(s).flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "other", [pd.Series(0, index=["a", "b", "c"]), pd.Series(0, index=["a", "b"])] + ) + # TODO: frame + @not_implemented + def test_align(self, other): + s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + a, b = s.align(other) + assert a.flags.allows_duplicate_labels is False + assert b.flags.allows_duplicate_labels is False + + def test_preserved_frame(self): + df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + assert df.loc[["a"]].flags.allows_duplicate_labels is False + assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False + + def test_to_frame(self): + ser = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False) + assert ser.to_frame().flags.allows_duplicate_labels is False + + @pytest.mark.parametrize("func", ["add", "sub"]) + @pytest.mark.parametrize("frame", [False, True]) + @pytest.mark.parametrize("other", [1, pd.Series([1, 2], name="A")]) + def test_binops(self, func, other, frame): + df = pd.Series([1, 2], name="A", index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + if frame: + df = df.to_frame() + if isinstance(other, pd.Series) and frame: + other = other.to_frame() + func = operator.methodcaller(func, other) + assert df.flags.allows_duplicate_labels is False + assert func(df).flags.allows_duplicate_labels is False + + def test_preserve_getitem(self): + df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) + assert df[["A"]].flags.allows_duplicate_labels is False + assert df["A"].flags.allows_duplicate_labels is False + assert df.loc[0].flags.allows_duplicate_labels is False + assert df.loc[[0]].flags.allows_duplicate_labels is False + assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False + + def test_ndframe_getitem_caching_issue( + self, request, using_copy_on_write, warn_copy_on_write + ): + if not (using_copy_on_write or warn_copy_on_write): + request.applymarker(pytest.mark.xfail(reason="Unclear behavior.")) + # NDFrame.__getitem__ will cache the first df['A']. May need to + # invalidate that cache? Update the cached entries? + df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False) + assert df["A"].flags.allows_duplicate_labels is False + df.flags.allows_duplicate_labels = True + assert df["A"].flags.allows_duplicate_labels is True + + @pytest.mark.parametrize( + "objs, kwargs", + [ + # Series + ( + [ + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["c", "d"]), + ], + {}, + ), + ( + [ + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["a", "b"]), + ], + {"ignore_index": True}, + ), + ( + [ + pd.Series(1, index=["a", "b"]), + pd.Series(2, index=["a", "b"]), + ], + {"axis": 1}, + ), + # Frame + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"A": [1, 2]}, index=["c", "d"]), + ], + {}, + ), + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + ], + {"ignore_index": True}, + ), + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.DataFrame({"B": [1, 2]}, index=["a", "b"]), + ], + {"axis": 1}, + ), + # Series / Frame + ( + [ + pd.DataFrame({"A": [1, 2]}, index=["a", "b"]), + pd.Series([1, 2], index=["a", "b"], name="B"), + ], + {"axis": 1}, + ), + ], + ) + def test_concat(self, objs, kwargs): + objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] + result = pd.concat(objs, **kwargs) + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize( + "left, right, expected", + [ + # false false false + pytest.param( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags( + allows_duplicate_labels=False + ), + False, + marks=not_implemented, + ), + # false true false + pytest.param( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + False, + marks=not_implemented, + ), + # true true true + ( + pd.DataFrame({"A": [0, 1]}, index=["a", "b"]), + pd.DataFrame({"B": [0, 1]}, index=["a", "d"]), + True, + ), + ], + ) + def test_merge(self, left, right, expected): + result = pd.merge(left, right, left_index=True, right_index=True) + assert result.flags.allows_duplicate_labels is expected + + @not_implemented + def test_groupby(self): + # XXX: This is under tested + # TODO: + # - apply + # - transform + # - Should passing a grouper that disallows duplicates propagate? + df = pd.DataFrame({"A": [1, 2, 3]}).set_flags(allows_duplicate_labels=False) + result = df.groupby([0, 0, 1]).agg("count") + assert result.flags.allows_duplicate_labels is False + + @pytest.mark.parametrize("frame", [True, False]) + @not_implemented + def test_window(self, frame): + df = pd.Series( + 1, + index=pd.date_range("2000", periods=12), + name="A", + allows_duplicate_labels=False, + ) + if frame: + df = df.to_frame() + assert df.rolling(3).mean().flags.allows_duplicate_labels is False + assert df.ewm(3).mean().flags.allows_duplicate_labels is False + assert df.expanding(3).mean().flags.allows_duplicate_labels is False + + +# ---------------------------------------------------------------------------- +# Raises + + +class TestRaises: + @pytest.mark.parametrize( + "cls, axes", + [ + (pd.Series, {"index": ["a", "a"], "dtype": float}), + (pd.DataFrame, {"index": ["a", "a"]}), + (pd.DataFrame, {"index": ["a", "a"], "columns": ["b", "b"]}), + (pd.DataFrame, {"columns": ["b", "b"]}), + ], + ) + def test_set_flags_with_duplicates(self, cls, axes): + result = cls(**axes) + assert result.flags.allows_duplicate_labels is True + + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + cls(**axes).set_flags(allows_duplicate_labels=False) + + @pytest.mark.parametrize( + "data", + [ + pd.Series(index=[0, 0], dtype=float), + pd.DataFrame(index=[0, 0]), + pd.DataFrame(columns=[0, 0]), + ], + ) + def test_setting_allows_duplicate_labels_raises(self, data): + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + data.flags.allows_duplicate_labels = False + + assert data.flags.allows_duplicate_labels is True + + def test_series_raises(self): + a = pd.Series(0, index=["a", "b"]) + b = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False) + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.concat([a, b]) + + @pytest.mark.parametrize( + "getter, target", + [ + (operator.itemgetter(["A", "A"]), None), + # loc + (operator.itemgetter(["a", "a"]), "loc"), + pytest.param(operator.itemgetter(("a", ["A", "A"])), "loc"), + (operator.itemgetter((["a", "a"], "A")), "loc"), + # iloc + (operator.itemgetter([0, 0]), "iloc"), + pytest.param(operator.itemgetter((0, [0, 0])), "iloc"), + pytest.param(operator.itemgetter(([0, 0], 0)), "iloc"), + ], + ) + def test_getitem_raises(self, getter, target): + df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags( + allows_duplicate_labels=False + ) + if target: + # df, df.loc, or df.iloc + target = getattr(df, target) + else: + target = df + + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + getter(target) + + @pytest.mark.parametrize( + "objs, kwargs", + [ + ( + [ + pd.Series(1, index=[0, 1], name="a"), + pd.Series(2, index=[0, 1], name="a"), + ], + {"axis": 1}, + ) + ], + ) + def test_concat_raises(self, objs, kwargs): + objs = [x.set_flags(allows_duplicate_labels=False) for x in objs] + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.concat(objs, **kwargs) + + @not_implemented + def test_merge_raises(self): + a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags( + allows_duplicate_labels=False + ) + b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"]) + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.merge(a, b, left_index=True, right_index=True) + + +@pytest.mark.parametrize( + "idx", + [ + pd.Index([1, 1]), + pd.Index(["a", "a"]), + pd.Index([1.1, 1.1]), + pd.PeriodIndex([pd.Period("2000", "D")] * 2), + pd.DatetimeIndex([pd.Timestamp("2000")] * 2), + pd.TimedeltaIndex([pd.Timedelta("1D")] * 2), + pd.CategoricalIndex(["a", "a"]), + pd.IntervalIndex([pd.Interval(0, 1)] * 2), + pd.MultiIndex.from_tuples([("a", 1), ("a", 1)]), + ], + ids=lambda x: type(x).__name__, +) +def test_raises_basic(idx): + msg = "Index has duplicates." + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False) + + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.DataFrame({"A": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False) + + with pytest.raises(pd.errors.DuplicateLabelError, match=msg): + pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False) + + +def test_format_duplicate_labels_message(): + idx = pd.Index(["a", "b", "a", "b", "c"]) + result = idx._format_duplicate_message() + expected = pd.DataFrame( + {"positions": [[0, 2], [1, 3]]}, index=pd.Index(["a", "b"], name="label") + ) + tm.assert_frame_equal(result, expected) + + +def test_format_duplicate_labels_message_multi(): + idx = pd.MultiIndex.from_product([["A"], ["a", "b", "a", "b", "c"]]) + result = idx._format_duplicate_message() + expected = pd.DataFrame( + {"positions": [[0, 2], [1, 3]]}, + index=pd.MultiIndex.from_product([["A"], ["a", "b"]]), + ) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_insert_raises(): + df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False) + msg = "Cannot specify" + with pytest.raises(ValueError, match=msg): + df.insert(0, "A", [3, 4], allow_duplicates=True) + + +@pytest.mark.parametrize( + "method, frame_only", + [ + (operator.methodcaller("set_index", "A", inplace=True), True), + (operator.methodcaller("reset_index", inplace=True), True), + (operator.methodcaller("rename", lambda x: x, inplace=True), False), + ], +) +def test_inplace_raises(method, frame_only): + df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags( + allows_duplicate_labels=False + ) + s = df["A"] + s.flags.allows_duplicate_labels = False + msg = "Cannot specify" + + with pytest.raises(ValueError, match=msg): + method(df) + if not frame_only: + with pytest.raises(ValueError, match=msg): + method(s) + + +def test_pickle(): + a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False) + b = tm.round_trip_pickle(a) + tm.assert_series_equal(a, b) + + a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False) + b = tm.round_trip_pickle(a) + tm.assert_frame_equal(a, b) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_finalize.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_finalize.py new file mode 100644 index 0000000000000000000000000000000000000000..866e9e203ffe3ac1fe29d86b87bbacccf1268e12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_finalize.py @@ -0,0 +1,767 @@ +""" +An exhaustive list of pandas methods exercising NDFrame.__finalize__. +""" +import operator +import re + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + +# TODO: +# * Binary methods (mul, div, etc.) +# * Binary outputs (align, etc.) +# * top-level methods (concat, merge, get_dummies, etc.) +# * window +# * cumulative reductions + +not_implemented_mark = pytest.mark.xfail(reason="not implemented") + +mi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"]) + +frame_data = ({"A": [1]},) +frame_mi_data = ({"A": [1, 2, 3, 4]}, mi) + + +# Tuple of +# - Callable: Constructor (Series, DataFrame) +# - Tuple: Constructor args +# - Callable: pass the constructed value with attrs set to this. + +_all_methods = [ + (pd.Series, ([0],), operator.methodcaller("take", [])), + (pd.Series, ([0],), operator.methodcaller("__getitem__", [True])), + (pd.Series, ([0],), operator.methodcaller("repeat", 2)), + (pd.Series, ([0],), operator.methodcaller("reset_index")), + (pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)), + (pd.Series, ([0],), operator.methodcaller("to_frame")), + (pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")), + (pd.Series, ([0, 0],), operator.methodcaller("duplicated")), + (pd.Series, ([0, 0],), operator.methodcaller("round")), + (pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)), + (pd.Series, ([0, 0],), operator.methodcaller("rename", "name")), + (pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])), + (pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])), + (pd.Series, ([0, 0],), operator.methodcaller("drop", [0])), + (pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)), + (pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})), + (pd.Series, ([0, 0],), operator.methodcaller("shift")), + (pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])), + (pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)), + (pd.Series, ([0, 0],), operator.methodcaller("isna")), + (pd.Series, ([0, 0],), operator.methodcaller("isnull")), + (pd.Series, ([0, 0],), operator.methodcaller("notna")), + (pd.Series, ([0, 0],), operator.methodcaller("notnull")), + (pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))), + # TODO: mul, div, etc. + ( + pd.Series, + ([0], pd.period_range("2000", periods=1)), + operator.methodcaller("to_timestamp"), + ), + ( + pd.Series, + ([0], pd.date_range("2000", periods=1)), + operator.methodcaller("to_period"), + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("dot", pd.DataFrame(index=["A"])), + ), + marks=pytest.mark.xfail(reason="Implement binary finalize"), + ), + (pd.DataFrame, frame_data, operator.methodcaller("transpose")), + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", np.array([True]))), + (pd.DataFrame, ({("A", "a"): [1]},), operator.methodcaller("__getitem__", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("query", "A == 1")), + (pd.DataFrame, frame_data, operator.methodcaller("eval", "A + 1", engine="python")), + (pd.DataFrame, frame_data, operator.methodcaller("select_dtypes", include="int")), + (pd.DataFrame, frame_data, operator.methodcaller("assign", b=1)), + (pd.DataFrame, frame_data, operator.methodcaller("set_axis", ["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("reindex", [0, 1])), + (pd.DataFrame, frame_data, operator.methodcaller("drop", columns=["A"])), + (pd.DataFrame, frame_data, operator.methodcaller("drop", index=[0])), + (pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})), + (pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)), + (pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")), + (pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("reset_index")), + (pd.DataFrame, frame_data, operator.methodcaller("isna")), + (pd.DataFrame, frame_data, operator.methodcaller("isnull")), + (pd.DataFrame, frame_data, operator.methodcaller("notna")), + (pd.DataFrame, frame_data, operator.methodcaller("notnull")), + (pd.DataFrame, frame_data, operator.methodcaller("dropna")), + (pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")), + (pd.DataFrame, frame_data, operator.methodcaller("duplicated")), + (pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")), + (pd.DataFrame, frame_data, operator.methodcaller("sort_index")), + (pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")), + (pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")), + (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("add", pd.DataFrame(*frame_data)), + ), + # TODO: div, mul, etc. + ( + pd.DataFrame, + frame_data, + operator.methodcaller("combine", pd.DataFrame(*frame_data), operator.add), + ), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("combine_first", pd.DataFrame(*frame_data)), + ), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("update", pd.DataFrame(*frame_data)), + ), + marks=not_implemented_mark, + ), + (pd.DataFrame, frame_data, operator.methodcaller("pivot", columns="A")), + ( + pd.DataFrame, + ({"A": [1], "B": [1]},), + operator.methodcaller("pivot_table", columns="A"), + ), + ( + pd.DataFrame, + ({"A": [1], "B": [1]},), + operator.methodcaller("pivot_table", columns="A", aggfunc=["mean", "sum"]), + ), + (pd.DataFrame, frame_data, operator.methodcaller("stack")), + (pd.DataFrame, frame_data, operator.methodcaller("explode", "A")), + (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack")), + ( + pd.DataFrame, + ({"A": ["a", "b", "c"], "B": [1, 3, 5], "C": [2, 4, 6]},), + operator.methodcaller("melt", id_vars=["A"], value_vars=["B"]), + ), + (pd.DataFrame, frame_data, operator.methodcaller("map", lambda x: x)), + pytest.param( + ( + pd.DataFrame, + frame_data, + operator.methodcaller("merge", pd.DataFrame({"A": [1]})), + ), + marks=not_implemented_mark, + ), + (pd.DataFrame, frame_data, operator.methodcaller("round", 2)), + (pd.DataFrame, frame_data, operator.methodcaller("corr")), + pytest.param( + (pd.DataFrame, frame_data, operator.methodcaller("cov")), + marks=[ + pytest.mark.filterwarnings("ignore::RuntimeWarning"), + ], + ), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("corrwith", pd.DataFrame(*frame_data)), + ), + (pd.DataFrame, frame_data, operator.methodcaller("count")), + (pd.DataFrame, frame_data, operator.methodcaller("nunique")), + (pd.DataFrame, frame_data, operator.methodcaller("idxmin")), + (pd.DataFrame, frame_data, operator.methodcaller("idxmax")), + (pd.DataFrame, frame_data, operator.methodcaller("mode")), + (pd.Series, [0], operator.methodcaller("mode")), + (pd.DataFrame, frame_data, operator.methodcaller("median")), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("quantile", numeric_only=True), + ), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("quantile", q=[0.25, 0.75], numeric_only=True), + ), + ( + pd.DataFrame, + ({"A": [pd.Timedelta(days=1), pd.Timedelta(days=2)]},), + operator.methodcaller("quantile", numeric_only=False), + ), + ( + pd.DataFrame, + ({"A": [np.datetime64("2022-01-01"), np.datetime64("2022-01-02")]},), + operator.methodcaller("quantile", numeric_only=True), + ), + ( + pd.DataFrame, + ({"A": [1]}, [pd.Period("2000", "D")]), + operator.methodcaller("to_timestamp"), + ), + ( + pd.DataFrame, + ({"A": [1]}, [pd.Timestamp("2000")]), + operator.methodcaller("to_period", freq="D"), + ), + (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", [1])), + (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", pd.Series([1]))), + ( + pd.DataFrame, + frame_mi_data, + operator.methodcaller("isin", pd.DataFrame({"A": [1]})), + ), + (pd.DataFrame, frame_mi_data, operator.methodcaller("droplevel", "A")), + (pd.DataFrame, frame_data, operator.methodcaller("pop", "A")), + # Squeeze on columns, otherwise we'll end up with a scalar + (pd.DataFrame, frame_data, operator.methodcaller("squeeze", axis="columns")), + (pd.Series, ([1, 2],), operator.methodcaller("squeeze")), + (pd.Series, ([1, 2],), operator.methodcaller("rename_axis", index="a")), + (pd.DataFrame, frame_data, operator.methodcaller("rename_axis", columns="a")), + # Unary ops + (pd.DataFrame, frame_data, operator.neg), + (pd.Series, [1], operator.neg), + (pd.DataFrame, frame_data, operator.pos), + (pd.Series, [1], operator.pos), + (pd.DataFrame, frame_data, operator.inv), + (pd.Series, [1], operator.inv), + (pd.DataFrame, frame_data, abs), + (pd.Series, [1], abs), + (pd.DataFrame, frame_data, round), + (pd.Series, [1], round), + (pd.DataFrame, frame_data, operator.methodcaller("take", [0, 0])), + (pd.DataFrame, frame_mi_data, operator.methodcaller("xs", "a")), + (pd.Series, (1, mi), operator.methodcaller("xs", "a")), + (pd.DataFrame, frame_data, operator.methodcaller("get", "A")), + ( + pd.DataFrame, + frame_data, + operator.methodcaller("reindex_like", pd.DataFrame({"A": [1, 2, 3]})), + ), + ( + pd.Series, + frame_data, + operator.methodcaller("reindex_like", pd.Series([0, 1, 2])), + ), + (pd.DataFrame, frame_data, operator.methodcaller("add_prefix", "_")), + (pd.DataFrame, frame_data, operator.methodcaller("add_suffix", "_")), + (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_prefix", "_")), + (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_suffix", "_")), + (pd.Series, ([3, 2],), operator.methodcaller("sort_values")), + (pd.Series, ([1] * 10,), operator.methodcaller("head")), + (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("head")), + (pd.Series, ([1] * 10,), operator.methodcaller("tail")), + (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("tail")), + (pd.Series, ([1, 2],), operator.methodcaller("sample", n=2, replace=True)), + (pd.DataFrame, (frame_data,), operator.methodcaller("sample", n=2, replace=True)), + (pd.Series, ([1, 2],), operator.methodcaller("astype", float)), + (pd.DataFrame, frame_data, operator.methodcaller("astype", float)), + (pd.Series, ([1, 2],), operator.methodcaller("copy")), + (pd.DataFrame, frame_data, operator.methodcaller("copy")), + (pd.Series, ([1, 2], None, object), operator.methodcaller("infer_objects")), + ( + pd.DataFrame, + ({"A": np.array([1, 2], dtype=object)},), + operator.methodcaller("infer_objects"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")), + (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")), + (pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")), + (pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")), + (pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)), + (pd.DataFrame, frame_data, operator.methodcaller("clip", lower=1)), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("asfreq", "h"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("asfreq", "h"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("at_time", "12:00"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("at_time", "12:00"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("between_time", "12:00", "13:00"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("between_time", "12:00", "13:00"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("last", "3D"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("last", "3D"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("rank")), + (pd.DataFrame, frame_data, operator.methodcaller("rank")), + (pd.Series, ([1, 2],), operator.methodcaller("where", np.array([True, False]))), + (pd.DataFrame, frame_data, operator.methodcaller("where", np.array([[True]]))), + (pd.Series, ([1, 2],), operator.methodcaller("mask", np.array([True, False]))), + (pd.DataFrame, frame_data, operator.methodcaller("mask", np.array([[True]]))), + (pd.Series, ([1, 2],), operator.methodcaller("truncate", before=0)), + (pd.DataFrame, frame_data, operator.methodcaller("truncate", before=0)), + ( + pd.Series, + (1, pd.date_range("2000", periods=4, tz="UTC")), + operator.methodcaller("tz_convert", "CET"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4, tz="UTC")), + operator.methodcaller("tz_convert", "CET"), + ), + ( + pd.Series, + (1, pd.date_range("2000", periods=4)), + operator.methodcaller("tz_localize", "CET"), + ), + ( + pd.DataFrame, + ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + operator.methodcaller("tz_localize", "CET"), + ), + (pd.Series, ([1, 2],), operator.methodcaller("describe")), + (pd.DataFrame, frame_data, operator.methodcaller("describe")), + (pd.Series, ([1, 2],), operator.methodcaller("pct_change")), + (pd.DataFrame, frame_data, operator.methodcaller("pct_change")), + (pd.Series, ([1],), operator.methodcaller("transform", lambda x: x - x.min())), + ( + pd.DataFrame, + frame_mi_data, + operator.methodcaller("transform", lambda x: x - x.min()), + ), + (pd.Series, ([1],), operator.methodcaller("apply", lambda x: x)), + (pd.DataFrame, frame_mi_data, operator.methodcaller("apply", lambda x: x)), + # Cumulative reductions + (pd.Series, ([1],), operator.methodcaller("cumsum")), + (pd.DataFrame, frame_data, operator.methodcaller("cumsum")), + (pd.Series, ([1],), operator.methodcaller("cummin")), + (pd.DataFrame, frame_data, operator.methodcaller("cummin")), + (pd.Series, ([1],), operator.methodcaller("cummax")), + (pd.DataFrame, frame_data, operator.methodcaller("cummax")), + (pd.Series, ([1],), operator.methodcaller("cumprod")), + (pd.DataFrame, frame_data, operator.methodcaller("cumprod")), + # Reductions + (pd.DataFrame, frame_data, operator.methodcaller("any")), + (pd.DataFrame, frame_data, operator.methodcaller("all")), + (pd.DataFrame, frame_data, operator.methodcaller("min")), + (pd.DataFrame, frame_data, operator.methodcaller("max")), + (pd.DataFrame, frame_data, operator.methodcaller("sum")), + (pd.DataFrame, frame_data, operator.methodcaller("std")), + (pd.DataFrame, frame_data, operator.methodcaller("mean")), + (pd.DataFrame, frame_data, operator.methodcaller("prod")), + (pd.DataFrame, frame_data, operator.methodcaller("sem")), + (pd.DataFrame, frame_data, operator.methodcaller("skew")), + (pd.DataFrame, frame_data, operator.methodcaller("kurt")), +] + + +def idfn(x): + xpr = re.compile(r"'(.*)?'") + m = xpr.search(str(x)) + if m: + return m.group(1) + else: + return str(x) + + +@pytest.fixture(params=_all_methods, ids=lambda x: idfn(x[-1])) +def ndframe_method(request): + """ + An NDFrame method returning an NDFrame. + """ + return request.param + + +@pytest.mark.filterwarnings( + "ignore:DataFrame.fillna with 'method' is deprecated:FutureWarning", + "ignore:last is deprecated:FutureWarning", +) +def test_finalize_called(ndframe_method): + cls, init_args, method = ndframe_method + ndframe = cls(*init_args) + + ndframe.attrs = {"a": 1} + result = method(ndframe) + + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "data", + [ + pd.Series(1, pd.date_range("2000", periods=4)), + pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + ], +) +def test_finalize_first(data): + deprecated_msg = "first is deprecated" + + data.attrs = {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = data.first("3D") + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "data", + [ + pd.Series(1, pd.date_range("2000", periods=4)), + pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)), + ], +) +def test_finalize_last(data): + # GH 53710 + deprecated_msg = "last is deprecated" + + data.attrs = {"a": 1} + with tm.assert_produces_warning(FutureWarning, match=deprecated_msg): + result = data.last("3D") + assert result.attrs == {"a": 1} + + +@not_implemented_mark +def test_finalize_called_eval_numexpr(): + pytest.importorskip("numexpr") + df = pd.DataFrame({"A": [1, 2]}) + df.attrs["A"] = 1 + result = df.eval("A + 1", engine="numexpr") + assert result.attrs == {"A": 1} + + +# ---------------------------------------------------------------------------- +# Binary operations + + +@pytest.mark.parametrize("annotate", ["left", "right", "both"]) +@pytest.mark.parametrize( + "args", + [ + (1, pd.Series([1])), + (1, pd.DataFrame({"A": [1]})), + (pd.Series([1]), 1), + (pd.DataFrame({"A": [1]}), 1), + (pd.Series([1]), pd.Series([1])), + (pd.DataFrame({"A": [1]}), pd.DataFrame({"A": [1]})), + (pd.Series([1]), pd.DataFrame({"A": [1]})), + (pd.DataFrame({"A": [1]}), pd.Series([1])), + ], + ids=lambda x: f"({type(x[0]).__name__},{type(x[1]).__name__})", +) +def test_binops(request, args, annotate, all_binary_operators): + # This generates 624 tests... Is that needed? + left, right = args + if isinstance(left, (pd.DataFrame, pd.Series)): + left.attrs = {} + if isinstance(right, (pd.DataFrame, pd.Series)): + right.attrs = {} + + if annotate == "left" and isinstance(left, int): + pytest.skip("left is an int and doesn't support .attrs") + if annotate == "right" and isinstance(right, int): + pytest.skip("right is an int and doesn't support .attrs") + + if not (isinstance(left, int) or isinstance(right, int)) and annotate != "both": + if not all_binary_operators.__name__.startswith("r"): + if annotate == "right" and isinstance(left, type(right)): + request.applymarker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when right has " + f"attrs and both are {type(left)}" + ) + ) + if not isinstance(left, type(right)): + if annotate == "left" and isinstance(left, pd.Series): + request.applymarker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when the " + "objects are different Series has attrs" + ) + ) + elif annotate == "right" and isinstance(right, pd.Series): + request.applymarker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when the " + "objects are different Series has attrs" + ) + ) + else: + if annotate == "left" and isinstance(left, type(right)): + request.applymarker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when left has " + f"attrs and both are {type(left)}" + ) + ) + if not isinstance(left, type(right)): + if annotate == "right" and isinstance(right, pd.Series): + request.applymarker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when the " + "objects are different Series has attrs" + ) + ) + elif annotate == "left" and isinstance(left, pd.Series): + request.applymarker( + pytest.mark.xfail( + reason=f"{all_binary_operators} doesn't work when the " + "objects are different Series has attrs" + ) + ) + if annotate in {"left", "both"} and not isinstance(left, int): + left.attrs = {"a": 1} + if annotate in {"right", "both"} and not isinstance(right, int): + right.attrs = {"a": 1} + + is_cmp = all_binary_operators in [ + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.lt, + operator.le, + ] + if is_cmp and isinstance(left, pd.DataFrame) and isinstance(right, pd.Series): + # in 2.0 silent alignment on comparisons was removed xref GH#28759 + left, right = left.align(right, axis=1, copy=False) + elif is_cmp and isinstance(left, pd.Series) and isinstance(right, pd.DataFrame): + right, left = right.align(left, axis=1, copy=False) + + result = all_binary_operators(left, right) + assert result.attrs == {"a": 1} + + +# ---------------------------------------------------------------------------- +# Accessors + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("capitalize"), + operator.methodcaller("casefold"), + operator.methodcaller("cat", ["a"]), + operator.methodcaller("contains", "a"), + operator.methodcaller("count", "a"), + operator.methodcaller("encode", "utf-8"), + operator.methodcaller("endswith", "a"), + operator.methodcaller("extract", r"(\w)(\d)"), + operator.methodcaller("extract", r"(\w)(\d)", expand=False), + operator.methodcaller("find", "a"), + operator.methodcaller("findall", "a"), + operator.methodcaller("get", 0), + operator.methodcaller("index", "a"), + operator.methodcaller("len"), + operator.methodcaller("ljust", 4), + operator.methodcaller("lower"), + operator.methodcaller("lstrip"), + operator.methodcaller("match", r"\w"), + operator.methodcaller("normalize", "NFC"), + operator.methodcaller("pad", 4), + operator.methodcaller("partition", "a"), + operator.methodcaller("repeat", 2), + operator.methodcaller("replace", "a", "b"), + operator.methodcaller("rfind", "a"), + operator.methodcaller("rindex", "a"), + operator.methodcaller("rjust", 4), + operator.methodcaller("rpartition", "a"), + operator.methodcaller("rstrip"), + operator.methodcaller("slice", 4), + operator.methodcaller("slice_replace", 1, repl="a"), + operator.methodcaller("startswith", "a"), + operator.methodcaller("strip"), + operator.methodcaller("swapcase"), + operator.methodcaller("translate", {"a": "b"}), + operator.methodcaller("upper"), + operator.methodcaller("wrap", 4), + operator.methodcaller("zfill", 4), + operator.methodcaller("isalnum"), + operator.methodcaller("isalpha"), + operator.methodcaller("isdigit"), + operator.methodcaller("isspace"), + operator.methodcaller("islower"), + operator.methodcaller("isupper"), + operator.methodcaller("istitle"), + operator.methodcaller("isnumeric"), + operator.methodcaller("isdecimal"), + operator.methodcaller("get_dummies"), + ], + ids=idfn, +) +def test_string_method(method): + s = pd.Series(["a1"]) + s.attrs = {"a": 1} + result = method(s.str) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("to_period"), + operator.methodcaller("tz_localize", "CET"), + operator.methodcaller("normalize"), + operator.methodcaller("strftime", "%Y"), + operator.methodcaller("round", "h"), + operator.methodcaller("floor", "h"), + operator.methodcaller("ceil", "h"), + operator.methodcaller("month_name"), + operator.methodcaller("day_name"), + ], + ids=idfn, +) +def test_datetime_method(method): + s = pd.Series(pd.date_range("2000", periods=4)) + s.attrs = {"a": 1} + result = method(s.dt) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "attr", + [ + "date", + "time", + "timetz", + "year", + "month", + "day", + "hour", + "minute", + "second", + "microsecond", + "nanosecond", + "dayofweek", + "day_of_week", + "dayofyear", + "day_of_year", + "quarter", + "is_month_start", + "is_month_end", + "is_quarter_start", + "is_quarter_end", + "is_year_start", + "is_year_end", + "is_leap_year", + "daysinmonth", + "days_in_month", + ], +) +def test_datetime_property(attr): + s = pd.Series(pd.date_range("2000", periods=4)) + s.attrs = {"a": 1} + result = getattr(s.dt, attr) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "attr", ["days", "seconds", "microseconds", "nanoseconds", "components"] +) +def test_timedelta_property(attr): + s = pd.Series(pd.timedelta_range("2000", periods=4)) + s.attrs = {"a": 1} + result = getattr(s.dt, attr) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")]) +def test_timedelta_methods(method): + s = pd.Series(pd.timedelta_range("2000", periods=4)) + s.attrs = {"a": 1} + result = method(s.dt) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("add_categories", ["c"]), + operator.methodcaller("as_ordered"), + operator.methodcaller("as_unordered"), + lambda x: getattr(x, "codes"), + operator.methodcaller("remove_categories", "a"), + operator.methodcaller("remove_unused_categories"), + operator.methodcaller("rename_categories", {"a": "A", "b": "B"}), + operator.methodcaller("reorder_categories", ["b", "a"]), + operator.methodcaller("set_categories", ["A", "B"]), + ], +) +@not_implemented_mark +def test_categorical_accessor(method): + s = pd.Series(["a", "b"], dtype="category") + s.attrs = {"a": 1} + result = method(s.cat) + assert result.attrs == {"a": 1} + + +# ---------------------------------------------------------------------------- +# Groupby + + +@pytest.mark.parametrize( + "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})] +) +@pytest.mark.parametrize( + "method", + [ + operator.methodcaller("sum"), + lambda x: x.apply(lambda y: y), + lambda x: x.agg("sum"), + lambda x: x.agg("mean"), + lambda x: x.agg("median"), + ], +) +def test_groupby_finalize(obj, method): + obj.attrs = {"a": 1} + result = method(obj.groupby([0, 0], group_keys=False)) + assert result.attrs == {"a": 1} + + +@pytest.mark.parametrize( + "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})] +) +@pytest.mark.parametrize( + "method", + [ + lambda x: x.agg(["sum", "count"]), + lambda x: x.agg("std"), + lambda x: x.agg("var"), + lambda x: x.agg("sem"), + lambda x: x.agg("size"), + lambda x: x.agg("ohlc"), + ], +) +@not_implemented_mark +def test_groupby_finalize_not_implemented(obj, method): + obj.attrs = {"a": 1} + result = method(obj.groupby([0, 0])) + assert result.attrs == {"a": 1} + + +def test_finalize_frame_series_name(): + # https://github.com/pandas-dev/pandas/pull/37186/files#r506978889 + # ensure we don't copy the column `name` to the Series. + df = pd.DataFrame({"name": [1, 2]}) + result = pd.Series([1, 2]).__finalize__(df) + assert result.name is None diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..fc7aa9e7b2c46362aa9b6a9ebfc4f663cfd61058 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py @@ -0,0 +1,209 @@ +from copy import deepcopy +from operator import methodcaller + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrame: + @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) + def test_set_axis_name(self, func): + df = DataFrame([[1, 2], [3, 4]]) + + result = methodcaller(func, "foo")(df) + assert df.index.name is None + assert result.index.name == "foo" + + result = methodcaller(func, "cols", axis=1)(df) + assert df.columns.name is None + assert result.columns.name == "cols" + + @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"]) + def test_set_axis_name_mi(self, func): + df = DataFrame( + np.empty((3, 3)), + index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]), + columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]), + ) + + level_names = ["L1", "L2"] + + result = methodcaller(func, level_names)(df) + assert result.index.names == level_names + assert result.columns.names == [None, None] + + result = methodcaller(func, level_names, axis=1)(df) + assert result.columns.names == ["L1", "L2"] + assert result.index.names == [None, None] + + def test_nonzero_single_element(self): + # allow single item via bool method + msg_warn = ( + "DataFrame.bool is now deprecated and will be removed " + "in future version of pandas" + ) + df = DataFrame([[True]]) + df1 = DataFrame([[False]]) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert df.bool() + + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert not df1.bool() + + df = DataFrame([[False, False]]) + msg_err = "The truth value of a DataFrame is ambiguous" + with pytest.raises(ValueError, match=msg_err): + bool(df) + + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err): + df.bool() + + def test_metadata_propagation_indiv_groupby(self): + # groupby + df = DataFrame( + { + "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], + "B": ["one", "one", "two", "three", "two", "two", "one", "three"], + "C": np.random.default_rng(2).standard_normal(8), + "D": np.random.default_rng(2).standard_normal(8), + } + ) + result = df.groupby("A").sum() + tm.assert_metadata_equivalent(df, result) + + def test_metadata_propagation_indiv_resample(self): + # resample + df = DataFrame( + np.random.default_rng(2).standard_normal((1000, 2)), + index=date_range("20130101", periods=1000, freq="s"), + ) + result = df.resample("1min") + tm.assert_metadata_equivalent(df, result) + + def test_metadata_propagation_indiv(self, monkeypatch): + # merging with override + # GH 6923 + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == "merge": + left, right = other.left, other.right + value = getattr(left, name, "") + "|" + getattr(right, name, "") + object.__setattr__(self, name, value) + elif method == "concat": + value = "+".join( + [getattr(o, name) for o in other.objs if getattr(o, name, None)] + ) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, "")) + + return self + + with monkeypatch.context() as m: + m.setattr(DataFrame, "_metadata", ["filename"]) + m.setattr(DataFrame, "__finalize__", finalize) + + df1 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"] + ) + df2 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"] + ) + DataFrame._metadata = ["filename"] + df1.filename = "fname1.csv" + df2.filename = "fname2.csv" + + result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner") + assert result.filename == "fname1.csv|fname2.csv" + + # concat + # GH#6927 + df1 = DataFrame( + np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab") + ) + df1.filename = "foo" + + result = pd.concat([df1, df1]) + assert result.filename == "foo+foo" + + def test_set_attribute(self): + # Test for consistent setattr behavior when an attribute and a column + # have the same name (Issue #8994) + df = DataFrame({"x": [1, 2, 3]}) + + df.y = 2 + df["y"] = [2, 4, 6] + df.y = 5 + + assert df.y == 5 + tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y")) + + def test_deepcopy_empty(self): + # This test covers empty frame copying with non-empty column sets + # as reported in issue GH15370 + empty_frame = DataFrame(data=[], index=[], columns=["A"]) + empty_frame_copy = deepcopy(empty_frame) + + tm.assert_frame_equal(empty_frame_copy, empty_frame) + + +# formerly in Generic but only test DataFrame +class TestDataFrame2: + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_bool_args(self, value): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + msg = 'For argument "inplace" expected type bool, received type' + with pytest.raises(ValueError, match=msg): + df.copy().rename_axis(mapper={"a": "x", "b": "y"}, axis=1, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().drop("a", axis=1, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().fillna(value=0, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().replace(to_replace=1, value=7, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().interpolate(inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy()._where(cond=df.a > 2, inplace=value) + + with pytest.raises(ValueError, match=msg): + df.copy().mask(cond=df.a > 2, inplace=value) + + def test_unexpected_keyword(self): + # GH8597 + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), columns=["jim", "joe"] + ) + ca = pd.Categorical([0, 0, 2, 2, 3, np.nan]) + ts = df["joe"].copy() + ts[2] = np.nan + + msg = "unexpected keyword" + with pytest.raises(TypeError, match=msg): + df.drop("joe", axis=1, in_place=True) + + with pytest.raises(TypeError, match=msg): + df.reindex([1, 0], inplace=True) + + with pytest.raises(TypeError, match=msg): + ca.fillna(0, inplace=True) + + with pytest.raises(TypeError, match=msg): + ts.fillna(0, in_place=True) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py new file mode 100644 index 0000000000000000000000000000000000000000..6564e381af0ea9b821e44f780ce209936f9524dc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py @@ -0,0 +1,504 @@ +from copy import ( + copy, + deepcopy, +) + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_scalar + +from pandas import ( + DataFrame, + Index, + Series, + date_range, +) +import pandas._testing as tm + +# ---------------------------------------------------------------------- +# Generic types test cases + + +def construct(box, shape, value=None, dtype=None, **kwargs): + """ + construct an object for the given shape + if value is specified use that if its a scalar + if value is an array, repeat it as needed + """ + if isinstance(shape, int): + shape = tuple([shape] * box._AXIS_LEN) + if value is not None: + if is_scalar(value): + if value == "empty": + arr = None + dtype = np.float64 + + # remove the info axis + kwargs.pop(box._info_axis_name, None) + else: + arr = np.empty(shape, dtype=dtype) + arr.fill(value) + else: + fshape = np.prod(shape) + arr = value.ravel() + new_shape = fshape / arr.shape[0] + if fshape % arr.shape[0] != 0: + raise Exception("invalid value passed in construct") + + arr = np.repeat(arr, new_shape).reshape(shape) + else: + arr = np.random.default_rng(2).standard_normal(shape) + return box(arr, dtype=dtype, **kwargs) + + +class TestGeneric: + @pytest.mark.parametrize( + "func", + [ + str.lower, + {x: x.lower() for x in list("ABCD")}, + Series({x: x.lower() for x in list("ABCD")}), + ], + ) + def test_rename(self, frame_or_series, func): + # single axis + idx = list("ABCD") + + for axis in frame_or_series._AXIS_ORDERS: + kwargs = {axis: idx} + obj = construct(frame_or_series, 4, **kwargs) + + # rename a single axis + result = obj.rename(**{axis: func}) + expected = obj.copy() + setattr(expected, axis, list("abcd")) + tm.assert_equal(result, expected) + + def test_get_numeric_data(self, frame_or_series): + n = 4 + kwargs = { + frame_or_series._get_axis_name(i): list(range(n)) + for i in range(frame_or_series._AXIS_LEN) + } + + # get the numeric data + o = construct(frame_or_series, n, **kwargs) + result = o._get_numeric_data() + tm.assert_equal(result, o) + + # non-inclusion + result = o._get_bool_data() + expected = construct(frame_or_series, n, value="empty", **kwargs) + if isinstance(o, DataFrame): + # preserve columns dtype + expected.columns = o.columns[:0] + # https://github.com/pandas-dev/pandas/issues/50862 + tm.assert_equal(result.reset_index(drop=True), expected) + + # get the bool data + arr = np.array([True, True, False, True]) + o = construct(frame_or_series, n, value=arr, **kwargs) + result = o._get_numeric_data() + tm.assert_equal(result, o) + + def test_nonzero(self, frame_or_series): + # GH 4633 + # look at the boolean/nonzero behavior for objects + obj = construct(frame_or_series, shape=4) + msg = f"The truth value of a {frame_or_series.__name__} is ambiguous" + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + obj = construct(frame_or_series, shape=4, value=1) + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + obj = construct(frame_or_series, shape=4, value=np.nan) + with pytest.raises(ValueError, match=msg): + bool(obj == 0) + with pytest.raises(ValueError, match=msg): + bool(obj == 1) + with pytest.raises(ValueError, match=msg): + bool(obj) + + # empty + obj = construct(frame_or_series, shape=0) + with pytest.raises(ValueError, match=msg): + bool(obj) + + # invalid behaviors + + obj1 = construct(frame_or_series, shape=4, value=1) + obj2 = construct(frame_or_series, shape=4, value=1) + + with pytest.raises(ValueError, match=msg): + if obj1: + pass + + with pytest.raises(ValueError, match=msg): + obj1 and obj2 + with pytest.raises(ValueError, match=msg): + obj1 or obj2 + with pytest.raises(ValueError, match=msg): + not obj1 + + def test_frame_or_series_compound_dtypes(self, frame_or_series): + # see gh-5191 + # Compound dtypes should raise NotImplementedError. + + def f(dtype): + return construct(frame_or_series, shape=3, value=1, dtype=dtype) + + msg = ( + "compound dtypes are not implemented " + f"in the {frame_or_series.__name__} constructor" + ) + + with pytest.raises(NotImplementedError, match=msg): + f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) + + # these work (though results may be unexpected) + f("int64") + f("float64") + f("M8[ns]") + + def test_metadata_propagation(self, frame_or_series): + # check that the metadata matches up on the resulting ops + + o = construct(frame_or_series, shape=3) + o.name = "foo" + o2 = construct(frame_or_series, shape=3) + o2.name = "bar" + + # ---------- + # preserving + # ---------- + + # simple ops with scalars + for op in ["__add__", "__sub__", "__truediv__", "__mul__"]: + result = getattr(o, op)(1) + tm.assert_metadata_equivalent(o, result) + + # ops with like + for op in ["__add__", "__sub__", "__truediv__", "__mul__"]: + result = getattr(o, op)(o) + tm.assert_metadata_equivalent(o, result) + + # simple boolean + for op in ["__eq__", "__le__", "__ge__"]: + v1 = getattr(o, op)(o) + tm.assert_metadata_equivalent(o, v1) + tm.assert_metadata_equivalent(o, v1 & v1) + tm.assert_metadata_equivalent(o, v1 | v1) + + # combine_first + result = o.combine_first(o2) + tm.assert_metadata_equivalent(o, result) + + # --------------------------- + # non-preserving (by default) + # --------------------------- + + # add non-like + result = o + o2 + tm.assert_metadata_equivalent(result) + + # simple boolean + for op in ["__eq__", "__le__", "__ge__"]: + # this is a name matching op + v1 = getattr(o, op)(o) + v2 = getattr(o, op)(o2) + tm.assert_metadata_equivalent(v2) + tm.assert_metadata_equivalent(v1 & v2) + tm.assert_metadata_equivalent(v1 | v2) + + def test_size_compat(self, frame_or_series): + # GH8846 + # size property should be defined + + o = construct(frame_or_series, shape=10) + assert o.size == np.prod(o.shape) + assert o.size == 10 ** len(o.axes) + + def test_split_compat(self, frame_or_series): + # xref GH8846 + o = construct(frame_or_series, shape=10) + with tm.assert_produces_warning( + FutureWarning, match=".swapaxes' is deprecated", check_stacklevel=False + ): + assert len(np.array_split(o, 5)) == 5 + assert len(np.array_split(o, 2)) == 2 + + # See gh-12301 + def test_stat_unexpected_keyword(self, frame_or_series): + obj = construct(frame_or_series, 5) + starwars = "Star Wars" + errmsg = "unexpected keyword" + + with pytest.raises(TypeError, match=errmsg): + obj.max(epic=starwars) # stat_function + with pytest.raises(TypeError, match=errmsg): + obj.var(epic=starwars) # stat_function_ddof + with pytest.raises(TypeError, match=errmsg): + obj.sum(epic=starwars) # cum_function + with pytest.raises(TypeError, match=errmsg): + obj.any(epic=starwars) # logical_function + + @pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"]) + def test_api_compat(self, func, frame_or_series): + # GH 12021 + # compat for __name__, __qualname__ + + obj = construct(frame_or_series, 5) + f = getattr(obj, func) + assert f.__name__ == func + assert f.__qualname__.endswith(func) + + def test_stat_non_defaults_args(self, frame_or_series): + obj = construct(frame_or_series, 5) + out = np.array([0]) + errmsg = "the 'out' parameter is not supported" + + with pytest.raises(ValueError, match=errmsg): + obj.max(out=out) # stat_function + with pytest.raises(ValueError, match=errmsg): + obj.var(out=out) # stat_function_ddof + with pytest.raises(ValueError, match=errmsg): + obj.sum(out=out) # cum_function + with pytest.raises(ValueError, match=errmsg): + obj.any(out=out) # logical_function + + def test_truncate_out_of_bounds(self, frame_or_series): + # GH11382 + + # small + shape = [2000] + ([1] * (frame_or_series._AXIS_LEN - 1)) + small = construct(frame_or_series, shape, dtype="int8", value=1) + tm.assert_equal(small.truncate(), small) + tm.assert_equal(small.truncate(before=0, after=3e3), small) + tm.assert_equal(small.truncate(before=-1, after=2e3), small) + + # big + shape = [2_000_000] + ([1] * (frame_or_series._AXIS_LEN - 1)) + big = construct(frame_or_series, shape, dtype="int8", value=1) + tm.assert_equal(big.truncate(), big) + tm.assert_equal(big.truncate(before=0, after=3e6), big) + tm.assert_equal(big.truncate(before=-1, after=2e6), big) + + @pytest.mark.parametrize( + "func", + [copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)], + ) + @pytest.mark.parametrize("shape", [0, 1, 2]) + def test_copy_and_deepcopy(self, frame_or_series, shape, func): + # GH 15444 + obj = construct(frame_or_series, shape) + obj_copy = func(obj) + assert obj_copy is not obj + tm.assert_equal(obj_copy, obj) + + def test_data_deprecated(self, frame_or_series): + obj = frame_or_series() + msg = "(Series|DataFrame)._data is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + mgr = obj._data + assert mgr is obj._mgr + + +class TestNDFrame: + # tests that don't fit elsewhere + + @pytest.mark.parametrize( + "ser", + [ + Series(range(10), dtype=np.float64), + Series([str(i) for i in range(10)], dtype=object), + ], + ) + def test_squeeze_series_noop(self, ser): + # noop + tm.assert_series_equal(ser.squeeze(), ser) + + def test_squeeze_frame_noop(self): + # noop + df = DataFrame(np.eye(2)) + tm.assert_frame_equal(df.squeeze(), df) + + def test_squeeze_frame_reindex(self): + # squeezing + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).reindex(columns=["A"]) + tm.assert_series_equal(df.squeeze(), df["A"]) + + def test_squeeze_0_len_dim(self): + # don't fail with 0 length dimensions GH11229 & GH8999 + empty_series = Series([], name="five", dtype=np.float64) + empty_frame = DataFrame([empty_series]) + tm.assert_series_equal(empty_series, empty_series.squeeze()) + tm.assert_series_equal(empty_series, empty_frame.squeeze()) + + def test_squeeze_axis(self): + # axis argument + df = DataFrame( + np.random.default_rng(2).standard_normal((1, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=1, freq="B"), + ).iloc[:, :1] + assert df.shape == (1, 1) + tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0]) + tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0]) + tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0]) + tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0]) + assert df.squeeze() == df.iloc[0, 0] + msg = "No axis named 2 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.squeeze(axis=2) + msg = "No axis named x for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.squeeze(axis="x") + + def test_squeeze_axis_len_3(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=3, freq="B"), + ) + tm.assert_frame_equal(df.squeeze(axis=0), df) + + def test_numpy_squeeze(self): + s = Series(range(2), dtype=np.float64) + tm.assert_series_equal(np.squeeze(s), s) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).reindex(columns=["A"]) + tm.assert_series_equal(np.squeeze(df), df["A"]) + + @pytest.mark.parametrize( + "ser", + [ + Series(range(10), dtype=np.float64), + Series([str(i) for i in range(10)], dtype=object), + ], + ) + def test_transpose_series(self, ser): + # calls implementation in pandas/core/base.py + tm.assert_series_equal(ser.transpose(), ser) + + def test_transpose_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + tm.assert_frame_equal(df.transpose().transpose(), df) + + def test_numpy_transpose(self, frame_or_series): + obj = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + obj = tm.get_obj(obj, frame_or_series) + + if frame_or_series is Series: + # 1D -> np.transpose is no-op + tm.assert_series_equal(np.transpose(obj), obj) + + # round-trip preserved + tm.assert_equal(np.transpose(np.transpose(obj)), obj) + + msg = "the 'axes' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.transpose(obj, axes=1) + + @pytest.mark.parametrize( + "ser", + [ + Series(range(10), dtype=np.float64), + Series([str(i) for i in range(10)], dtype=object), + ], + ) + def test_take_series(self, ser): + indices = [1, 5, -2, 6, 3, -1] + out = ser.take(indices) + expected = Series( + data=ser.values.take(indices), + index=ser.index.take(indices), + dtype=ser.dtype, + ) + tm.assert_series_equal(out, expected) + + def test_take_frame(self): + indices = [1, 5, -2, 6, 3, -1] + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + out = df.take(indices) + expected = DataFrame( + data=df.values.take(indices, axis=0), + index=df.index.take(indices), + columns=df.columns, + ) + tm.assert_frame_equal(out, expected) + + def test_take_invalid_kwargs(self, frame_or_series): + indices = [-3, 2, 0, 1] + + obj = DataFrame(range(5)) + obj = tm.get_obj(obj, frame_or_series) + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + obj.take(indices, foo=2) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + obj.take(indices, out=indices) + + msg = "the 'mode' parameter is not supported" + with pytest.raises(ValueError, match=msg): + obj.take(indices, mode="clip") + + def test_axis_classmethods(self, frame_or_series): + box = frame_or_series + obj = box(dtype=object) + values = box._AXIS_TO_AXIS_NUMBER.keys() + for v in values: + assert obj._get_axis_number(v) == box._get_axis_number(v) + assert obj._get_axis_name(v) == box._get_axis_name(v) + assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v) + + def test_flags_identity(self, frame_or_series): + obj = Series([1, 2]) + if frame_or_series is DataFrame: + obj = obj.to_frame() + + assert obj.flags is obj.flags + obj2 = obj.copy() + assert obj2.flags is not obj.flags + + def test_bool_dep(self) -> None: + # GH-51749 + msg_warn = ( + "DataFrame.bool is now deprecated and will be removed " + "in future version of pandas" + ) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + DataFrame({"col": [False]}).bool() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..97be46f716d7daa98c1c1ebab04e1e6abb3a55bc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py @@ -0,0 +1,336 @@ +import pytest + +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd + + +# Fixtures +# ======== +@pytest.fixture +def df(): + """DataFrame with columns 'L1', 'L2', and 'L3'""" + return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]}) + + +@pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]]) +def df_levels(request, df): + """DataFrame with columns or index levels 'L1', 'L2', and 'L3'""" + levels = request.param + + if levels: + df = df.set_index(levels) + + return df + + +@pytest.fixture +def df_ambig(df): + """DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3'""" + df = df.set_index(["L1", "L2"]) + + df["L1"] = df["L3"] + + return df + + +@pytest.fixture +def df_duplabels(df): + """DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2'""" + df = df.set_index(["L1"]) + df = pd.concat([df, df["L2"]], axis=1) + + return df + + +# Test is label/level reference +# ============================= +def get_labels_levels(df_levels): + expected_labels = list(df_levels.columns) + expected_levels = [name for name in df_levels.index.names if name is not None] + return expected_labels, expected_levels + + +def assert_label_reference(frame, labels, axis): + for label in labels: + assert frame._is_label_reference(label, axis=axis) + assert not frame._is_level_reference(label, axis=axis) + assert frame._is_label_or_level_reference(label, axis=axis) + + +def assert_level_reference(frame, levels, axis): + for level in levels: + assert frame._is_level_reference(level, axis=axis) + assert not frame._is_label_reference(level, axis=axis) + assert frame._is_label_or_level_reference(level, axis=axis) + + +# DataFrame +# --------- +def test_is_level_or_label_reference_df_simple(df_levels, axis): + axis = df_levels._get_axis_number(axis) + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_level_reference(df_levels, expected_levels, axis=axis) + assert_label_reference(df_levels, expected_labels, axis=axis) + + +def test_is_level_reference_df_ambig(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + + # df has both an on-axis level and off-axis label named L1 + # Therefore L1 should reference the label, not the level + assert_label_reference(df_ambig, ["L1"], axis=axis) + + # df has an on-axis level named L2 and it is not ambiguous + # Therefore L2 is an level reference + assert_level_reference(df_ambig, ["L2"], axis=axis) + + # df has a column named L3 and it not an level reference + assert_label_reference(df_ambig, ["L3"], axis=axis) + + +# Series +# ------ +def test_is_level_reference_series_simple_axis0(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_level_reference(s, ["L1"], axis=0) + assert not s._is_level_reference("L2") + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_level_reference(s, ["L1", "L2"], axis=0) + assert not s._is_level_reference("L3") + + +def test_is_level_reference_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._is_level_reference("L1", axis=1) + + +# Test _check_label_or_level_ambiguity_df +# ======================================= + + +# DataFrame +# --------- +def test_check_label_or_level_ambiguity_df(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + msg = "'L1' is both a column level and an index label" + + else: + msg = "'L1' is both an index level and a column label" + # df_ambig has both an on-axis level and off-axis label named L1 + # Therefore, L1 is ambiguous. + with pytest.raises(ValueError, match=msg): + df_ambig._check_label_or_level_ambiguity("L1", axis=axis) + + # df_ambig has an on-axis level named L2,, and it is not ambiguous. + df_ambig._check_label_or_level_ambiguity("L2", axis=axis) + + # df_ambig has an off-axis label named L3, and it is not ambiguous + assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis) + + +# Series +# ------ +def test_check_label_or_level_ambiguity_series(df): + # A series has no columns and therefore references are never ambiguous + + # Make series with L1 as index + s = df.set_index("L1").L2 + s._check_label_or_level_ambiguity("L1", axis=0) + s._check_label_or_level_ambiguity("L2", axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + s._check_label_or_level_ambiguity("L1", axis=0) + s._check_label_or_level_ambiguity("L2", axis=0) + s._check_label_or_level_ambiguity("L3", axis=0) + + +def test_check_label_or_level_ambiguity_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._check_label_or_level_ambiguity("L1", axis=1) + + +# Test _get_label_or_level_values +# =============================== +def assert_label_values(frame, labels, axis): + axis = frame._get_axis_number(axis) + for label in labels: + if axis == 0: + expected = frame[label]._values + else: + expected = frame.loc[label]._values + + result = frame._get_label_or_level_values(label, axis=axis) + assert array_equivalent(expected, result) + + +def assert_level_values(frame, levels, axis): + axis = frame._get_axis_number(axis) + for level in levels: + if axis == 0: + expected = frame.index.get_level_values(level=level)._values + else: + expected = frame.columns.get_level_values(level=level)._values + + result = frame._get_label_or_level_values(level, axis=axis) + assert array_equivalent(expected, result) + + +# DataFrame +# --------- +def test_get_label_or_level_values_df_simple(df_levels, axis): + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + axis = df_levels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_label_values(df_levels, expected_labels, axis=axis) + assert_level_values(df_levels, expected_levels, axis=axis) + + +def test_get_label_or_level_values_df_ambig(df_ambig, axis): + axis = df_ambig._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_ambig = df_ambig.T + + # df has an on-axis level named L2, and it is not ambiguous. + assert_level_values(df_ambig, ["L2"], axis=axis) + + # df has an off-axis label named L3, and it is not ambiguous. + assert_label_values(df_ambig, ["L3"], axis=axis) + + +def test_get_label_or_level_values_df_duplabels(df_duplabels, axis): + axis = df_duplabels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_duplabels = df_duplabels.T + + # df has unambiguous level 'L1' + assert_level_values(df_duplabels, ["L1"], axis=axis) + + # df has unique label 'L3' + assert_label_values(df_duplabels, ["L3"], axis=axis) + + # df has duplicate labels 'L2' + if axis == 0: + expected_msg = "The column label 'L2' is not unique" + else: + expected_msg = "The index label 'L2' is not unique" + + with pytest.raises(ValueError, match=expected_msg): + assert_label_values(df_duplabels, ["L2"], axis=axis) + + +# Series +# ------ +def test_get_label_or_level_values_series_axis0(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_level_values(s, ["L1"], axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_level_values(s, ["L1", "L2"], axis=0) + + +def test_get_label_or_level_values_series_axis1_error(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + + with pytest.raises(ValueError, match="No axis named 1"): + s._get_label_or_level_values("L1", axis=1) + + +# Test _drop_labels_or_levels +# =========================== +def assert_labels_dropped(frame, labels, axis): + axis = frame._get_axis_number(axis) + for label in labels: + df_dropped = frame._drop_labels_or_levels(label, axis=axis) + + if axis == 0: + assert label in frame.columns + assert label not in df_dropped.columns + else: + assert label in frame.index + assert label not in df_dropped.index + + +def assert_levels_dropped(frame, levels, axis): + axis = frame._get_axis_number(axis) + for level in levels: + df_dropped = frame._drop_labels_or_levels(level, axis=axis) + + if axis == 0: + assert level in frame.index.names + assert level not in df_dropped.index.names + else: + assert level in frame.columns.names + assert level not in df_dropped.columns.names + + +# DataFrame +# --------- +def test_drop_labels_or_levels_df(df_levels, axis): + # Compute expected labels and levels + expected_labels, expected_levels = get_labels_levels(df_levels) + + axis = df_levels._get_axis_number(axis) + # Transpose frame if axis == 1 + if axis == 1: + df_levels = df_levels.T + + # Perform checks + assert_labels_dropped(df_levels, expected_labels, axis=axis) + assert_levels_dropped(df_levels, expected_levels, axis=axis) + + with pytest.raises(ValueError, match="not valid labels or levels"): + df_levels._drop_labels_or_levels("L4", axis=axis) + + +# Series +# ------ +def test_drop_labels_or_levels_series(df): + # Make series with L1 as index + s = df.set_index("L1").L2 + assert_levels_dropped(s, ["L1"], axis=0) + + with pytest.raises(ValueError, match="not valid labels or levels"): + s._drop_labels_or_levels("L4", axis=0) + + # Make series with L1 and L2 as index + s = df.set_index(["L1", "L2"]).L3 + assert_levels_dropped(s, ["L1", "L2"], axis=0) + + with pytest.raises(ValueError, match="not valid labels or levels"): + s._drop_labels_or_levels("L4", axis=0) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_series.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_series.py new file mode 100644 index 0000000000000000000000000000000000000000..3648961eb3808a316b2a23d3d720fdd26fe7fd06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_series.py @@ -0,0 +1,159 @@ +from operator import methodcaller + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + + +class TestSeries: + @pytest.mark.parametrize("func", ["rename_axis", "_set_axis_name"]) + def test_set_axis_name_mi(self, func): + ser = Series( + [11, 21, 31], + index=MultiIndex.from_tuples( + [("A", x) for x in ["a", "B", "c"]], names=["l1", "l2"] + ), + ) + + result = methodcaller(func, ["L1", "L2"])(ser) + assert ser.index.name is None + assert ser.index.names == ["l1", "l2"] + assert result.index.name is None + assert result.index.names, ["L1", "L2"] + + def test_set_axis_name_raises(self): + ser = Series([1]) + msg = "No axis named 1 for object type Series" + with pytest.raises(ValueError, match=msg): + ser._set_axis_name(name="a", axis=1) + + def test_get_bool_data_preserve_dtype(self): + ser = Series([True, False, True]) + result = ser._get_bool_data() + tm.assert_series_equal(result, ser) + + def test_nonzero_single_element(self): + # allow single item via bool method + msg_warn = ( + "Series.bool is now deprecated and will be removed " + "in future version of pandas" + ) + ser = Series([True]) + ser1 = Series([False]) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert ser.bool() + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + assert not ser1.bool() + + @pytest.mark.parametrize("data", [np.nan, pd.NaT, True, False]) + def test_nonzero_single_element_raise_1(self, data): + # single item nan to raise + series = Series([data]) + + msg = "The truth value of a Series is ambiguous" + with pytest.raises(ValueError, match=msg): + bool(series) + + @pytest.mark.parametrize("data", [np.nan, pd.NaT]) + def test_nonzero_single_element_raise_2(self, data): + msg_warn = ( + "Series.bool is now deprecated and will be removed " + "in future version of pandas" + ) + msg_err = "bool cannot act on a non-boolean single element Series" + series = Series([data]) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err): + series.bool() + + @pytest.mark.parametrize("data", [(True, True), (False, False)]) + def test_nonzero_multiple_element_raise(self, data): + # multiple bool are still an error + msg_warn = ( + "Series.bool is now deprecated and will be removed " + "in future version of pandas" + ) + msg_err = "The truth value of a Series is ambiguous" + series = Series([data]) + with pytest.raises(ValueError, match=msg_err): + bool(series) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err): + series.bool() + + @pytest.mark.parametrize("data", [1, 0, "a", 0.0]) + def test_nonbool_single_element_raise(self, data): + # single non-bool are an error + msg_warn = ( + "Series.bool is now deprecated and will be removed " + "in future version of pandas" + ) + msg_err1 = "The truth value of a Series is ambiguous" + msg_err2 = "bool cannot act on a non-boolean single element Series" + series = Series([data]) + with pytest.raises(ValueError, match=msg_err1): + bool(series) + with tm.assert_produces_warning(FutureWarning, match=msg_warn): + with pytest.raises(ValueError, match=msg_err2): + series.bool() + + def test_metadata_propagation_indiv_resample(self): + # resample + ts = Series( + np.random.default_rng(2).random(1000), + index=date_range("20130101", periods=1000, freq="s"), + name="foo", + ) + result = ts.resample("1min").mean() + tm.assert_metadata_equivalent(ts, result) + + result = ts.resample("1min").min() + tm.assert_metadata_equivalent(ts, result) + + result = ts.resample("1min").apply(lambda x: x.sum()) + tm.assert_metadata_equivalent(ts, result) + + def test_metadata_propagation_indiv(self, monkeypatch): + # check that the metadata matches up on the resulting ops + + ser = Series(range(3), range(3)) + ser.name = "foo" + ser2 = Series(range(3), range(3)) + ser2.name = "bar" + + result = ser.T + tm.assert_metadata_equivalent(ser, result) + + def finalize(self, other, method=None, **kwargs): + for name in self._metadata: + if method == "concat" and name == "filename": + value = "+".join( + [ + getattr(obj, name) + for obj in other.objs + if getattr(obj, name, None) + ] + ) + object.__setattr__(self, name, value) + else: + object.__setattr__(self, name, getattr(other, name, None)) + + return self + + with monkeypatch.context() as m: + m.setattr(Series, "_metadata", ["name", "filename"]) + m.setattr(Series, "__finalize__", finalize) + + ser.filename = "foo" + ser2.filename = "bar" + + result = pd.concat([ser, ser2]) + assert result.filename == "foo+bar" + assert result.name is None diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py new file mode 100644 index 0000000000000000000000000000000000000000..d8401a8b2ae3f3b885374375b64910e166dbe525 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py @@ -0,0 +1,130 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm + +pytest.importorskip("xarray") + + +class TestDataFrameToXArray: + @pytest.fixture + def df(self): + return DataFrame( + { + "a": list("abcd"), + "b": list(range(1, 5)), + "c": np.arange(3, 7).astype("u1"), + "d": np.arange(4.0, 8.0, dtype="float64"), + "e": [True, False, True, False], + "f": Categorical(list("abcd")), + "g": date_range("20130101", periods=4), + "h": date_range("20130101", periods=4, tz="US/Eastern"), + } + ) + + def test_to_xarray_index_types(self, index_flat, df, using_infer_string): + index = index_flat + # MultiIndex is tested in test_to_xarray_with_multiindex + if len(index) == 0: + pytest.skip("Test doesn't make sense for empty index") + + from xarray import Dataset + + df.index = index[:4] + df.index.name = "foo" + df.columns.name = "bar" + result = df.to_xarray() + assert result.sizes["foo"] == 4 + assert len(result.coords) == 1 + assert len(result.data_vars) == 8 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, Dataset) + + # idempotency + # datetimes w/tz are preserved + # column names are lost + expected = df.copy() + expected["f"] = expected["f"].astype( + object if not using_infer_string else "string[pyarrow_numpy]" + ) + expected.columns.name = None + tm.assert_frame_equal(result.to_dataframe(), expected) + + def test_to_xarray_empty(self, df): + from xarray import Dataset + + df.index.name = "foo" + result = df[0:0].to_xarray() + assert result.sizes["foo"] == 0 + assert isinstance(result, Dataset) + + def test_to_xarray_with_multiindex(self, df, using_infer_string): + from xarray import Dataset + + # MultiIndex + df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"]) + result = df.to_xarray() + assert result.sizes["one"] == 1 + assert result.sizes["two"] == 4 + assert len(result.coords) == 2 + assert len(result.data_vars) == 8 + tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"]) + assert isinstance(result, Dataset) + + result = result.to_dataframe() + expected = df.copy() + expected["f"] = expected["f"].astype( + object if not using_infer_string else "string[pyarrow_numpy]" + ) + expected.columns.name = None + tm.assert_frame_equal(result, expected) + + +class TestSeriesToXArray: + def test_to_xarray_index_types(self, index_flat): + index = index_flat + # MultiIndex is tested in test_to_xarray_with_multiindex + + from xarray import DataArray + + ser = Series(range(len(index)), index=index, dtype="int64") + ser.index.name = "foo" + result = ser.to_xarray() + repr(result) + assert len(result) == len(index) + assert len(result.coords) == 1 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, DataArray) + + # idempotency + tm.assert_series_equal(result.to_series(), ser) + + def test_to_xarray_empty(self): + from xarray import DataArray + + ser = Series([], dtype=object) + ser.index.name = "foo" + result = ser.to_xarray() + assert len(result) == 0 + assert len(result.coords) == 1 + tm.assert_almost_equal(list(result.coords.keys()), ["foo"]) + assert isinstance(result, DataArray) + + def test_to_xarray_with_multiindex(self): + from xarray import DataArray + + mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"]) + ser = Series(range(6), dtype="int64", index=mi) + result = ser.to_xarray() + assert len(result) == 2 + tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"]) + assert isinstance(result, DataArray) + res = result.to_series() + tm.assert_series_equal(res, ser) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01ab303cc60c278f17ad97b38c97872d3a292bba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a01bf93143fce7e96f833417f36c875b33eb690d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad9b213bc67fb2069a6d97ba5970f5adb3441634 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c56c909770b055abff94781524b13427602cbacc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccb40fb3bf2ea8fe1c44b94707348e9fc9f97470 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cca2fcb304ca5f4ca00ea7aa52d8ac98bed27152 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17bfc01c23b58aacd4e26384ba7870307d6928f2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..313107172d67af4d0eeffacaecd1c48ca4d9e165 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/common.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/common.py new file mode 100644 index 0000000000000000000000000000000000000000..69120160699c24cc86670522f84ec6c7014c20ee --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/common.py @@ -0,0 +1,563 @@ +""" +Module consolidating common testing functions for checking plotting. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.api import is_list_like + +import pandas as pd +from pandas import Series +import pandas._testing as tm + +if TYPE_CHECKING: + from collections.abc import Sequence + + from matplotlib.axes import Axes + + +def _check_legend_labels(axes, labels=None, visible=True): + """ + Check each axes has expected legend labels + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + labels : list-like + expected legend labels + visible : bool + expected legend visibility. labels are checked only when visible is + True + """ + if visible and (labels is None): + raise ValueError("labels must be specified when visible is True") + axes = _flatten_visible(axes) + for ax in axes: + if visible: + assert ax.get_legend() is not None + _check_text_labels(ax.get_legend().get_texts(), labels) + else: + assert ax.get_legend() is None + + +def _check_legend_marker(ax, expected_markers=None, visible=True): + """ + Check ax has expected legend markers + + Parameters + ---------- + ax : matplotlib Axes object + expected_markers : list-like + expected legend markers + visible : bool + expected legend visibility. labels are checked only when visible is + True + """ + if visible and (expected_markers is None): + raise ValueError("Markers must be specified when visible is True") + if visible: + handles, _ = ax.get_legend_handles_labels() + markers = [handle.get_marker() for handle in handles] + assert markers == expected_markers + else: + assert ax.get_legend() is None + + +def _check_data(xp, rs): + """ + Check each axes has identical lines + + Parameters + ---------- + xp : matplotlib Axes object + rs : matplotlib Axes object + """ + import matplotlib.pyplot as plt + + xp_lines = xp.get_lines() + rs_lines = rs.get_lines() + + assert len(xp_lines) == len(rs_lines) + for xpl, rsl in zip(xp_lines, rs_lines): + xpdata = xpl.get_xydata() + rsdata = rsl.get_xydata() + tm.assert_almost_equal(xpdata, rsdata) + + plt.close("all") + + +def _check_visible(collections, visible=True): + """ + Check each artist is visible or not + + Parameters + ---------- + collections : matplotlib Artist or its list-like + target Artist or its list or collection + visible : bool + expected visibility + """ + from matplotlib.collections import Collection + + if not isinstance(collections, Collection) and not is_list_like(collections): + collections = [collections] + + for patch in collections: + assert patch.get_visible() == visible + + +def _check_patches_all_filled(axes: Axes | Sequence[Axes], filled: bool = True) -> None: + """ + Check for each artist whether it is filled or not + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + filled : bool + expected filling + """ + + axes = _flatten_visible(axes) + for ax in axes: + for patch in ax.patches: + assert patch.fill == filled + + +def _get_colors_mapped(series, colors): + unique = series.unique() + # unique and colors length can be differed + # depending on slice value + mapped = dict(zip(unique, colors)) + return [mapped[v] for v in series.values] + + +def _check_colors(collections, linecolors=None, facecolors=None, mapping=None): + """ + Check each artist has expected line colors and face colors + + Parameters + ---------- + collections : list-like + list or collection of target artist + linecolors : list-like which has the same length as collections + list of expected line colors + facecolors : list-like which has the same length as collections + list of expected face colors + mapping : Series + Series used for color grouping key + used for andrew_curves, parallel_coordinates, radviz test + """ + from matplotlib import colors + from matplotlib.collections import ( + Collection, + LineCollection, + PolyCollection, + ) + from matplotlib.lines import Line2D + + conv = colors.ColorConverter + if linecolors is not None: + if mapping is not None: + linecolors = _get_colors_mapped(mapping, linecolors) + linecolors = linecolors[: len(collections)] + + assert len(collections) == len(linecolors) + for patch, color in zip(collections, linecolors): + if isinstance(patch, Line2D): + result = patch.get_color() + # Line2D may contains string color expression + result = conv.to_rgba(result) + elif isinstance(patch, (PolyCollection, LineCollection)): + result = tuple(patch.get_edgecolor()[0]) + else: + result = patch.get_edgecolor() + + expected = conv.to_rgba(color) + assert result == expected + + if facecolors is not None: + if mapping is not None: + facecolors = _get_colors_mapped(mapping, facecolors) + facecolors = facecolors[: len(collections)] + + assert len(collections) == len(facecolors) + for patch, color in zip(collections, facecolors): + if isinstance(patch, Collection): + # returned as list of np.array + result = patch.get_facecolor()[0] + else: + result = patch.get_facecolor() + + if isinstance(result, np.ndarray): + result = tuple(result) + + expected = conv.to_rgba(color) + assert result == expected + + +def _check_text_labels(texts, expected): + """ + Check each text has expected labels + + Parameters + ---------- + texts : matplotlib Text object, or its list-like + target text, or its list + expected : str or list-like which has the same length as texts + expected text label, or its list + """ + if not is_list_like(texts): + assert texts.get_text() == expected + else: + labels = [t.get_text() for t in texts] + assert len(labels) == len(expected) + for label, e in zip(labels, expected): + assert label == e + + +def _check_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None): + """ + Check each axes has expected tick properties + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xlabelsize : number + expected xticks font size + xrot : number + expected xticks rotation + ylabelsize : number + expected yticks font size + yrot : number + expected yticks rotation + """ + from matplotlib.ticker import NullFormatter + + axes = _flatten_visible(axes) + for ax in axes: + if xlabelsize is not None or xrot is not None: + if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter): + # If minor ticks has NullFormatter, rot / fontsize are not + # retained + labels = ax.get_xticklabels() + else: + labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True) + + for label in labels: + if xlabelsize is not None: + tm.assert_almost_equal(label.get_fontsize(), xlabelsize) + if xrot is not None: + tm.assert_almost_equal(label.get_rotation(), xrot) + + if ylabelsize is not None or yrot is not None: + if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter): + labels = ax.get_yticklabels() + else: + labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True) + + for label in labels: + if ylabelsize is not None: + tm.assert_almost_equal(label.get_fontsize(), ylabelsize) + if yrot is not None: + tm.assert_almost_equal(label.get_rotation(), yrot) + + +def _check_ax_scales(axes, xaxis="linear", yaxis="linear"): + """ + Check each axes has expected scales + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xaxis : {'linear', 'log'} + expected xaxis scale + yaxis : {'linear', 'log'} + expected yaxis scale + """ + axes = _flatten_visible(axes) + for ax in axes: + assert ax.xaxis.get_scale() == xaxis + assert ax.yaxis.get_scale() == yaxis + + +def _check_axes_shape(axes, axes_num=None, layout=None, figsize=None): + """ + Check expected number of axes is drawn in expected layout + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + axes_num : number + expected number of axes. Unnecessary axes should be set to + invisible. + layout : tuple + expected layout, (expected number of rows , columns) + figsize : tuple + expected figsize. default is matplotlib default + """ + from pandas.plotting._matplotlib.tools import flatten_axes + + if figsize is None: + figsize = (6.4, 4.8) + visible_axes = _flatten_visible(axes) + + if axes_num is not None: + assert len(visible_axes) == axes_num + for ax in visible_axes: + # check something drawn on visible axes + assert len(ax.get_children()) > 0 + + if layout is not None: + x_set = set() + y_set = set() + for ax in flatten_axes(axes): + # check axes coordinates to estimate layout + points = ax.get_position().get_points() + x_set.add(points[0][0]) + y_set.add(points[0][1]) + result = (len(y_set), len(x_set)) + assert result == layout + + tm.assert_numpy_array_equal( + visible_axes[0].figure.get_size_inches(), + np.array(figsize, dtype=np.float64), + ) + + +def _flatten_visible(axes: Axes | Sequence[Axes]) -> Sequence[Axes]: + """ + Flatten axes, and filter only visible + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + + """ + from pandas.plotting._matplotlib.tools import flatten_axes + + axes_ndarray = flatten_axes(axes) + axes = [ax for ax in axes_ndarray if ax.get_visible()] + return axes + + +def _check_has_errorbars(axes, xerr=0, yerr=0): + """ + Check axes has expected number of errorbars + + Parameters + ---------- + axes : matplotlib Axes object, or its list-like + xerr : number + expected number of x errorbar + yerr : number + expected number of y errorbar + """ + axes = _flatten_visible(axes) + for ax in axes: + containers = ax.containers + xerr_count = 0 + yerr_count = 0 + for c in containers: + has_xerr = getattr(c, "has_xerr", False) + has_yerr = getattr(c, "has_yerr", False) + if has_xerr: + xerr_count += 1 + if has_yerr: + yerr_count += 1 + assert xerr == xerr_count + assert yerr == yerr_count + + +def _check_box_return_type( + returned, return_type, expected_keys=None, check_ax_title=True +): + """ + Check box returned type is correct + + Parameters + ---------- + returned : object to be tested, returned from boxplot + return_type : str + return_type passed to boxplot + expected_keys : list-like, optional + group labels in subplot case. If not passed, + the function checks assuming boxplot uses single ax + check_ax_title : bool + Whether to check the ax.title is the same as expected_key + Intended to be checked by calling from ``boxplot``. + Normal ``plot`` doesn't attach ``ax.title``, it must be disabled. + """ + from matplotlib.axes import Axes + + types = {"dict": dict, "axes": Axes, "both": tuple} + if expected_keys is None: + # should be fixed when the returning default is changed + if return_type is None: + return_type = "dict" + + assert isinstance(returned, types[return_type]) + if return_type == "both": + assert isinstance(returned.ax, Axes) + assert isinstance(returned.lines, dict) + else: + # should be fixed when the returning default is changed + if return_type is None: + for r in _flatten_visible(returned): + assert isinstance(r, Axes) + return + + assert isinstance(returned, Series) + + assert sorted(returned.keys()) == sorted(expected_keys) + for key, value in returned.items(): + assert isinstance(value, types[return_type]) + # check returned dict has correct mapping + if return_type == "axes": + if check_ax_title: + assert value.get_title() == key + elif return_type == "both": + if check_ax_title: + assert value.ax.get_title() == key + assert isinstance(value.ax, Axes) + assert isinstance(value.lines, dict) + elif return_type == "dict": + line = value["medians"][0] + axes = line.axes + if check_ax_title: + assert axes.get_title() == key + else: + raise AssertionError + + +def _check_grid_settings(obj, kinds, kws={}): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + + import matplotlib as mpl + + def is_grid_on(): + xticks = mpl.pyplot.gca().xaxis.get_major_ticks() + yticks = mpl.pyplot.gca().yaxis.get_major_ticks() + xoff = all(not g.gridline.get_visible() for g in xticks) + yoff = all(not g.gridline.get_visible() for g in yticks) + + return not (xoff and yoff) + + spndx = 1 + for kind in kinds: + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=False) + obj.plot(kind=kind, **kws) + assert not is_grid_on() + mpl.pyplot.clf() + + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=True) + obj.plot(kind=kind, grid=False, **kws) + assert not is_grid_on() + mpl.pyplot.clf() + + if kind not in ["pie", "hexbin", "scatter"]: + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=True) + obj.plot(kind=kind, **kws) + assert is_grid_on() + mpl.pyplot.clf() + + mpl.pyplot.subplot(1, 4 * len(kinds), spndx) + spndx += 1 + mpl.rc("axes", grid=False) + obj.plot(kind=kind, grid=True, **kws) + assert is_grid_on() + mpl.pyplot.clf() + + +def _unpack_cycler(rcParams, field="color"): + """ + Auxiliary function for correctly unpacking cycler after MPL >= 1.5 + """ + return [v[field] for v in rcParams["axes.prop_cycle"]] + + +def get_x_axis(ax): + return ax._shared_axes["x"] + + +def get_y_axis(ax): + return ax._shared_axes["y"] + + +def _check_plot_works(f, default_axes=False, **kwargs): + """ + Create plot and ensure that plot return object is valid. + + Parameters + ---------- + f : func + Plotting function. + default_axes : bool, optional + If False (default): + - If `ax` not in `kwargs`, then create subplot(211) and plot there + - Create new subplot(212) and plot there as well + - Mind special corner case for bootstrap_plot (see `_gen_two_subplots`) + If True: + - Simply run plotting function with kwargs provided + - All required axes instances will be created automatically + - It is recommended to use it when the plotting function + creates multiple axes itself. It helps avoid warnings like + 'UserWarning: To output multiple subplots, + the figure containing the passed axes is being cleared' + **kwargs + Keyword arguments passed to the plotting function. + + Returns + ------- + Plot object returned by the last plotting. + """ + import matplotlib.pyplot as plt + + if default_axes: + gen_plots = _gen_default_plot + else: + gen_plots = _gen_two_subplots + + ret = None + try: + fig = kwargs.get("figure", plt.gcf()) + plt.clf() + + for ret in gen_plots(f, fig, **kwargs): + tm.assert_is_valid_plot_return_object(ret) + + finally: + plt.close(fig) + + return ret + + +def _gen_default_plot(f, fig, **kwargs): + """ + Create plot in a default way. + """ + yield f(**kwargs) + + +def _gen_two_subplots(f, fig, **kwargs): + """ + Create plot on two subplots forcefully created. + """ + if "ax" not in kwargs: + fig.add_subplot(211) + yield f(**kwargs) + + if f is pd.plotting.bootstrap_plot: + assert "ax" not in kwargs + else: + kwargs["ax"] = fig.add_subplot(212) + yield f(**kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..d688bbd47595c2ec6451bd9ddf7c916275013384 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py @@ -0,0 +1,56 @@ +import gc + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + to_datetime, +) + + +@pytest.fixture(autouse=True) +def mpl_cleanup(): + # matplotlib/testing/decorators.py#L24 + # 1) Resets units registry + # 2) Resets rc_context + # 3) Closes all figures + mpl = pytest.importorskip("matplotlib") + mpl_units = pytest.importorskip("matplotlib.units") + plt = pytest.importorskip("matplotlib.pyplot") + orig_units_registry = mpl_units.registry.copy() + with mpl.rc_context(): + mpl.use("template") + yield + mpl_units.registry.clear() + mpl_units.registry.update(orig_units_registry) + plt.close("all") + # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501 + gc.collect(1) + + +@pytest.fixture +def hist_df(): + n = 50 + rng = np.random.default_rng(10) + gender = rng.choice(["Male", "Female"], size=n) + classroom = rng.choice(["A", "B", "C"], size=n) + + hist_df = DataFrame( + { + "gender": gender, + "classroom": classroom, + "height": rng.normal(66, 4, size=n), + "weight": rng.normal(161, 32, size=n), + "category": rng.integers(4, size=n), + "datetime": to_datetime( + rng.integers( + 812419200000000000, + 819331200000000000, + size=n, + dtype=np.int64, + ) + ), + } + ) + return hist_df diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6ce6a3f3fcde0b8a7c1ca8f2e1e3467036c6275 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d427462d66e9520da79e5c4441d8b97d044289ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30240f0d1812aeba45a31c263c50ac7efce4e4d2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68d8c47fb4bad8ef436e650ed35f03c7edf48511 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5af9116ab3362f76797f39637a7c967938e173cb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a17634e6fae62c7a11f7082b03bc54d9ae0f7838 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a850d7de86071698d76bdef86a7e0e4c0adf5cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..45dc612148f40ea29c7fac46b6b9d8edd29b17fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py @@ -0,0 +1,2592 @@ +""" Test cases for DataFrame.plot """ +from datetime import ( + date, + datetime, +) +import gc +import itertools +import re +import string +import weakref + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.api import is_list_like + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + PeriodIndex, + Series, + bdate_range, + date_range, + option_context, + plotting, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_box_return_type, + _check_colors, + _check_data, + _check_grid_settings, + _check_has_errorbars, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + _check_visible, + get_y_axis, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestDataFramePlots: + @pytest.mark.slow + def test_plot(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _check_plot_works(df.plot, grid=False) + + @pytest.mark.slow + def test_plot_subplots(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + # _check_plot_works adds an ax so use default_axes=True to avoid warning + axes = _check_plot_works(df.plot, default_axes=True, subplots=True) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + @pytest.mark.slow + def test_plot_subplots_negative_layout(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + axes = _check_plot_works( + df.plot, + default_axes=True, + subplots=True, + layout=(-1, 2), + ) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + @pytest.mark.slow + def test_plot_subplots_use_index(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + axes = _check_plot_works( + df.plot, + default_axes=True, + subplots=True, + use_index=False, + ) + _check_ticks_props(axes, xrot=0) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + @pytest.mark.slow + def test_plot_invalid_arg(self): + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + msg = "'Line2D' object has no property 'blarg'" + with pytest.raises(AttributeError, match=msg): + df.plot.line(blarg=True) + + @pytest.mark.slow + def test_plot_tick_props(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + ax = _check_plot_works(df.plot, use_index=True) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {"yticks": [1, 5, 10]}, + {"xticks": [1, 5, 10]}, + {"ylim": (-100, 100), "xlim": (-100, 100)}, + {"default_axes": True, "subplots": True, "title": "blah"}, + ], + ) + def test_plot_other_args(self, kwargs): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, **kwargs) + + @pytest.mark.slow + def test_plot_visible_ax(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + # We have to redo it here because _check_plot_works does two plots, + # once without an ax kwarg and once with an ax kwarg and the new sharex + # behaviour does not remove the visibility of the latter axis (as ax is + # present). see: https://github.com/pandas-dev/pandas/issues/9737 + + axes = df.plot(subplots=True, title="blah") + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + for ax in axes[:2]: + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible([ax.xaxis.get_label()], visible=False) + for ax in [axes[2]]: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible([ax.xaxis.get_label()]) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + def test_plot_title(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, title="blah") + + @pytest.mark.slow + def test_plot_multiindex(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + ax = _check_plot_works(df.plot, use_index=True) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + def test_plot_multiindex_unicode(self): + # unicode + index = MultiIndex.from_tuples( + [ + ("\u03b1", 0), + ("\u03b1", 1), + ("\u03b2", 2), + ("\u03b2", 3), + ("\u03b3", 4), + ("\u03b3", 5), + ("\u03b4", 6), + ("\u03b4", 7), + ], + names=["i0", "i1"], + ) + columns = MultiIndex.from_tuples( + [("bar", "\u0394"), ("bar", "\u0395")], names=["c0", "c1"] + ) + df = DataFrame( + np.random.default_rng(2).integers(0, 10, (8, 2)), + columns=columns, + index=index, + ) + _check_plot_works(df.plot, title="\u03A3") + + @pytest.mark.slow + @pytest.mark.parametrize("layout", [None, (-1, 1)]) + def test_plot_single_column_bar(self, layout): + # GH 6951 + # Test with single column + df = DataFrame({"x": np.random.default_rng(2).random(10)}) + axes = _check_plot_works(df.plot.bar, subplots=True, layout=layout) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.slow + def test_plot_passed_ax(self): + # When ax is supplied and required number of axes is 1, + # passed ax should be used: + df = DataFrame({"x": np.random.default_rng(2).random(10)}) + _, ax = mpl.pyplot.subplots() + axes = df.plot.bar(subplots=True, ax=ax) + assert len(axes) == 1 + result = ax.axes + assert result is axes[0] + + @pytest.mark.parametrize( + "cols, x, y", + [ + [list("ABCDE"), "A", "B"], + [["A", "B"], "A", "B"], + [["C", "A"], "C", "A"], + [["A", "C"], "A", "C"], + [["B", "C"], "B", "C"], + [["A", "D"], "A", "D"], + [["A", "E"], "A", "E"], + ], + ) + def test_nullable_int_plot(self, cols, x, y): + # GH 32073 + dates = ["2008", "2009", None, "2011", "2012"] + df = DataFrame( + { + "A": [1, 2, 3, 4, 5], + "B": [1, 2, 3, 4, 5], + "C": np.array([7, 5, np.nan, 3, 2], dtype=object), + "D": pd.to_datetime(dates, format="%Y").view("i8"), + "E": pd.to_datetime(dates, format="%Y", utc=True).view("i8"), + } + ) + + _check_plot_works(df[cols].plot, x=x, y=y) + + @pytest.mark.slow + @pytest.mark.parametrize("plot", ["line", "bar", "hist", "pie"]) + def test_integer_array_plot_series(self, plot): + # GH 25587 + arr = pd.array([1, 2, 3, 4], dtype="UInt32") + + s = Series(arr) + _check_plot_works(getattr(s.plot, plot)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "plot, kwargs", + [ + ["line", {}], + ["bar", {}], + ["hist", {}], + ["pie", {"y": "y"}], + ["scatter", {"x": "x", "y": "y"}], + ["hexbin", {"x": "x", "y": "y"}], + ], + ) + def test_integer_array_plot_df(self, plot, kwargs): + # GH 25587 + arr = pd.array([1, 2, 3, 4], dtype="UInt32") + df = DataFrame({"x": arr, "y": arr}) + _check_plot_works(getattr(df.plot, plot), **kwargs) + + def test_nonnumeric_exclude(self): + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}) + ax = df.plot() + assert len(ax.get_lines()) == 1 # B was plotted + + def test_implicit_label(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + ax = df.plot(x="a", y="b") + _check_text_labels(ax.xaxis.get_label(), "a") + + def test_donot_overwrite_index_name(self): + # GH 8494 + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), columns=["a", "b"] + ) + df.index.name = "NAME" + df.plot(y="b", label="LABEL") + assert df.index.name == "NAME" + + def test_plot_xy(self): + # columns.inferred_type == 'string' + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + _check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot()) + _check_data(df.plot(x=0), df.set_index("A").plot()) + _check_data(df.plot(y=0), df.B.plot()) + _check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot()) + _check_data(df.plot(x="A"), df.set_index("A").plot()) + _check_data(df.plot(y="B"), df.B.plot()) + + def test_plot_xy_int_cols(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + # columns.inferred_type == 'integer' + df.columns = np.arange(1, len(df.columns) + 1) + _check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot()) + _check_data(df.plot(x=1), df.set_index(1).plot()) + _check_data(df.plot(y=1), df[1].plot()) + + def test_plot_xy_figsize_and_title(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + # figsize and title + ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8)) + _check_text_labels(ax.title, "Test") + _check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0)) + + # columns.inferred_type == 'mixed' + # TODO add MultiIndex test + + @pytest.mark.parametrize( + "input_log, expected_log", [(True, "log"), ("sym", "symlog")] + ) + def test_logscales(self, input_log, expected_log): + df = DataFrame({"a": np.arange(100)}, index=np.arange(100)) + + ax = df.plot(logy=input_log) + _check_ax_scales(ax, yaxis=expected_log) + assert ax.get_yscale() == expected_log + + ax = df.plot(logx=input_log) + _check_ax_scales(ax, xaxis=expected_log) + assert ax.get_xscale() == expected_log + + ax = df.plot(loglog=input_log) + _check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log) + assert ax.get_xscale() == expected_log + assert ax.get_yscale() == expected_log + + @pytest.mark.parametrize("input_param", ["logx", "logy", "loglog"]) + def test_invalid_logscale(self, input_param): + # GH: 24867 + df = DataFrame({"a": np.arange(100)}, index=np.arange(100)) + + msg = f"keyword '{input_param}' should be bool, None, or 'sym', not 'sm'" + with pytest.raises(ValueError, match=msg): + df.plot(**{input_param: "sm"}) + + msg = f"PiePlot ignores the '{input_param}' keyword" + with tm.assert_produces_warning(UserWarning, match=msg): + df.plot.pie(subplots=True, **{input_param: True}) + + def test_xcompat(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot(x_compat=True) + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_params(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + plotting.plot_params["xaxis.compat"] = True + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_params_x_compat(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + plotting.plot_params["x_compat"] = False + + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex) + + def test_xcompat_plot_params_context_manager(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + # useful if you're plotting a bunch together + with plotting.plot_params.use("x_compat", True): + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_period(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + msg = r"PeriodDtype\[B\] is deprecated " + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex) + _check_ticks_props(ax, xrot=0) + + def test_period_compat(self): + # GH 9012 + # period-array conversions + df = DataFrame( + np.random.default_rng(2).random((21, 2)), + index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)), + columns=["a", "b"], + ) + + df.plot() + mpl.pyplot.axhline(y=0) + + @pytest.mark.parametrize("index_dtype", [np.int64, np.float64]) + def test_unsorted_index(self, index_dtype): + df = DataFrame( + {"y": np.arange(100)}, + index=Index(np.arange(99, -1, -1), dtype=index_dtype), + dtype=np.int64, + ) + ax = df.plot() + lines = ax.get_lines()[0] + rs = lines.get_xydata() + rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y") + tm.assert_series_equal(rs, df.y, check_index_type=False) + + @pytest.mark.parametrize( + "df", + [ + DataFrame({"y": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0]), + DataFrame( + {"y": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]}, + index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0], + ), + ], + ) + def test_unsorted_index_lims(self, df): + ax = df.plot() + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data()[0]) + assert xmax >= np.nanmax(lines[0].get_data()[0]) + + def test_unsorted_index_lims_x_y(self): + df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0], "z": [91.0, 90.0, 93.0, 92.0]}) + ax = df.plot(x="z", y="y") + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data()[0]) + assert xmax >= np.nanmax(lines[0].get_data()[0]) + + def test_negative_log(self): + df = -DataFrame( + np.random.default_rng(2).random((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + msg = "Log-y scales are not supported in area plot" + with pytest.raises(ValueError, match=msg): + df.plot.area(logy=True) + with pytest.raises(ValueError, match=msg): + df.plot.area(loglog=True) + + def _compare_stacked_y_cood(self, normal_lines, stacked_lines): + base = np.zeros(len(normal_lines[0].get_data()[1])) + for nl, sl in zip(normal_lines, stacked_lines): + base += nl.get_data()[1] # get y coordinates + sy = sl.get_data()[1] + tm.assert_numpy_array_equal(base, sy) + + @pytest.mark.parametrize("kind", ["line", "area"]) + @pytest.mark.parametrize("mult", [1, -1]) + def test_line_area_stacked(self, kind, mult): + df = mult * DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["w", "x", "y", "z"] + ) + + ax1 = _check_plot_works(df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines, ax2.lines) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_line_area_stacked_sep_df(self, kind): + # each column has either positive or negative value + sep_df = DataFrame( + { + "w": np.random.default_rng(2).random(6), + "x": np.random.default_rng(2).random(6), + "y": -np.random.default_rng(2).random(6), + "z": -np.random.default_rng(2).random(6), + } + ) + ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2]) + self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:]) + + def test_line_area_stacked_mixed(self): + mixed_df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["w", "x", "y", "z"], + ) + _check_plot_works(mixed_df.plot, stacked=False) + + msg = ( + "When stacked is True, each column must be either all positive or " + "all negative. Column 'w' contains both positive and negative " + "values" + ) + with pytest.raises(ValueError, match=msg): + mixed_df.plot(stacked=True) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_line_area_stacked_positive_idx(self, kind): + df = DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["w", "x", "y", "z"] + ) + # Use an index with strictly positive values, preventing + # matplotlib from warning about ignoring xlim + df2 = df.set_index(df.index + 1) + _check_plot_works(df2.plot, kind=kind, logx=True, stacked=True) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + def test_line_area_nan_df(self, idx): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + ax = _check_plot_works(df.plot) + masked1 = ax.lines[0].get_ydata() + masked2 = ax.lines[1].get_ydata() + # remove nan for comparison purpose + + exp = np.array([1, 2, 3], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp) + + exp = np.array([3, 2, 1], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp) + tm.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False])) + tm.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False])) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + def test_line_area_nan_df_stacked(self, idx): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + expected1 = np.array([1, 2, 0, 3], dtype=np.float64) + expected2 = np.array([3, 0, 2, 1], dtype=np.float64) + + ax = _check_plot_works(df.plot, stacked=True) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + @pytest.mark.parametrize("kwargs", [{}, {"stacked": False}]) + def test_line_area_nan_df_stacked_area(self, idx, kwargs): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + expected1 = np.array([1, 2, 0, 3], dtype=np.float64) + expected2 = np.array([3, 0, 2, 1], dtype=np.float64) + + ax = _check_plot_works(df.plot.area, **kwargs) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + if kwargs: + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) + else: + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) + + ax = _check_plot_works(df.plot.area, stacked=False) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) + + @pytest.mark.parametrize("kwargs", [{}, {"secondary_y": True}]) + def test_line_lim(self, kwargs): + df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=["x", "y", "z"]) + ax = df.plot(**kwargs) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + + def test_line_lim_subplots(self): + df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=["x", "y", "z"]) + axes = df.plot(secondary_y=True, subplots=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + for ax in axes: + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + + @pytest.mark.xfail( + strict=False, + reason="2020-12-01 this has been failing periodically on the " + "ymin==0 assertion for a week or so.", + ) + @pytest.mark.parametrize("stacked", [True, False]) + def test_area_lim(self, stacked): + df = DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["x", "y", "z", "four"] + ) + + neg_df = -df + + ax = _check_plot_works(df.plot.area, stacked=stacked) + xmin, xmax = ax.get_xlim() + ymin, ymax = ax.get_ylim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + assert ymin == 0 + + ax = _check_plot_works(neg_df.plot.area, stacked=stacked) + ymin, ymax = ax.get_ylim() + assert ymax == 0 + + def test_area_sharey_dont_overwrite(self): + # GH37942 + df = DataFrame(np.random.default_rng(2).random((4, 2)), columns=["x", "y"]) + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True) + + df.plot(ax=ax1, kind="area") + df.plot(ax=ax2, kind="area") + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + @pytest.mark.parametrize("stacked", [True, False]) + def test_bar_linewidth(self, stacked): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot.bar(stacked=stacked, linewidth=2) + for r in ax.patches: + assert r.get_linewidth() == 2 + + def test_bar_linewidth_subplots(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # subplots + axes = df.plot.bar(linewidth=2, subplots=True) + _check_axes_shape(axes, axes_num=5, layout=(5, 1)) + for ax in axes: + for r in ax.patches: + assert r.get_linewidth() == 2 + + @pytest.mark.parametrize( + "meth, dim", [("bar", "get_width"), ("barh", "get_height")] + ) + @pytest.mark.parametrize("stacked", [True, False]) + def test_bar_barwidth(self, meth, dim, stacked): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + width = 0.9 + + ax = getattr(df.plot, meth)(stacked=stacked, width=width) + for r in ax.patches: + if not stacked: + assert getattr(r, dim)() == width / len(df.columns) + else: + assert getattr(r, dim)() == width + + @pytest.mark.parametrize( + "meth, dim", [("bar", "get_width"), ("barh", "get_height")] + ) + def test_barh_barwidth_subplots(self, meth, dim): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + width = 0.9 + + axes = getattr(df.plot, meth)(width=width, subplots=True) + for ax in axes: + for r in ax.patches: + assert getattr(r, dim)() == width + + def test_bar_bottom_left_bottom(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.bar(stacked=False, bottom=1) + result = [p.get_y() for p in ax.patches] + assert result == [1] * 25 + + ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5]) + result = [p.get_y() for p in ax.patches[:5]] + assert result == [-1, -2, -3, -4, -5] + + def test_bar_bottom_left_left(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1])) + result = [p.get_x() for p in ax.patches] + assert result == [1] * 25 + + ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5]) + result = [p.get_x() for p in ax.patches[:5]] + assert result == [1, 2, 3, 4, 5] + + def test_bar_bottom_left_subplots(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + axes = df.plot.bar(subplots=True, bottom=-1) + for ax in axes: + result = [p.get_y() for p in ax.patches] + assert result == [-1] * 5 + + axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1])) + for ax in axes: + result = [p.get_x() for p in ax.patches] + assert result == [1] * 5 + + def test_bar_nan(self): + df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]}) + ax = df.plot.bar() + expected = [10, 0, 20, 5, 10, 20, 1, 2, 3] + result = [p.get_height() for p in ax.patches] + assert result == expected + + def test_bar_nan_stacked(self): + df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]}) + ax = df.plot.bar(stacked=True) + expected = [10, 0, 20, 5, 10, 20, 1, 2, 3] + result = [p.get_height() for p in ax.patches] + assert result == expected + + result = [p.get_y() for p in ax.patches] + expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0] + assert result == expected + + @pytest.mark.parametrize("idx", [Index, pd.CategoricalIndex]) + def test_bar_categorical(self, idx): + # GH 13019 + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 5)), + index=idx(list("ABCDEF")), + columns=idx(list("abcde")), + ) + + ax = df.plot.bar() + ticks = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5])) + assert ax.get_xlim() == (-0.5, 5.5) + # check left-edge of bars + assert ax.patches[0].get_x() == -0.25 + assert ax.patches[-1].get_x() == 5.15 + + ax = df.plot.bar(stacked=True) + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5])) + assert ax.get_xlim() == (-0.5, 5.5) + assert ax.patches[0].get_x() == -0.25 + assert ax.patches[-1].get_x() == 4.75 + + @pytest.mark.parametrize("x, y", [("x", "y"), (1, 2)]) + def test_plot_scatter(self, x, y): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + def test_plot_scatter_error(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + msg = re.escape("scatter() missing 1 required positional argument: 'y'") + with pytest.raises(TypeError, match=msg): + df.plot.scatter(x="x") + msg = re.escape("scatter() missing 1 required positional argument: 'x'") + with pytest.raises(TypeError, match=msg): + df.plot.scatter(y="y") + + def test_plot_scatter_shape(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + # GH 6951 + axes = df.plot(x="x", y="y", kind="scatter", subplots=True) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + def test_raise_error_on_datetime_time_data(self): + # GH 8113, datetime.time type is not supported by matplotlib in scatter + df = DataFrame(np.random.default_rng(2).standard_normal(10), columns=["a"]) + df["dtime"] = date_range(start="2014-01-01", freq="h", periods=10).time + msg = "must be a string or a (real )?number, not 'datetime.time'" + + with pytest.raises(TypeError, match=msg): + df.plot(kind="scatter", x="dtime", y="a") + + @pytest.mark.parametrize("x, y", [("dates", "vals"), (0, 1)]) + def test_scatterplot_datetime_data(self, x, y): + # GH 30391 + dates = date_range(start=date(2019, 1, 1), periods=12, freq="W") + vals = np.random.default_rng(2).normal(0, 1, len(dates)) + df = DataFrame({"dates": dates, "vals": vals}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + @pytest.mark.parametrize("x, y", [("a", "b"), (0, 1)]) + @pytest.mark.parametrize("b_col", [[2, 3, 4], ["a", "b", "c"]]) + def test_scatterplot_object_data(self, b_col, x, y, infer_string): + # GH 18755 + with option_context("future.infer_string", infer_string): + df = DataFrame({"a": ["A", "B", "C"], "b": b_col}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize("ordered", [True, False]) + @pytest.mark.parametrize( + "categories", + (["setosa", "versicolor", "virginica"], ["versicolor", "virginica", "setosa"]), + ) + def test_scatterplot_color_by_categorical(self, ordered, categories): + df = DataFrame( + [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]], + columns=["length", "width"], + ) + df["species"] = pd.Categorical( + ["setosa", "setosa", "virginica", "virginica", "versicolor"], + ordered=ordered, + categories=categories, + ) + ax = df.plot.scatter(x=0, y=1, c="species") + (colorbar_collection,) = ax.collections + colorbar = colorbar_collection.colorbar + + expected_ticks = np.array([0.5, 1.5, 2.5]) + result_ticks = colorbar.get_ticks() + tm.assert_numpy_array_equal(result_ticks, expected_ticks) + + expected_boundaries = np.array([0.0, 1.0, 2.0, 3.0]) + result_boundaries = colorbar._boundaries + tm.assert_numpy_array_equal(result_boundaries, expected_boundaries) + + expected_yticklabels = categories + result_yticklabels = [i.get_text() for i in colorbar.ax.get_ymajorticklabels()] + assert all(i == j for i, j in zip(result_yticklabels, expected_yticklabels)) + + @pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")]) + def test_plot_scatter_with_categorical_data(self, x, y): + # after fixing GH 18755, should be able to plot categorical data + df = DataFrame({"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize("x, y, c", [("x", "y", "z"), (0, 1, 2)]) + def test_plot_scatter_with_c(self, x, y, c): + df = DataFrame( + np.random.default_rng(2).integers(low=0, high=100, size=(6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + + ax = df.plot.scatter(x=x, y=y, c=c) + # default to Greys + assert ax.collections[0].cmap.name == "Greys" + + assert ax.collections[0].colorbar.ax.get_ylabel() == "z" + + def test_plot_scatter_with_c_props(self): + df = DataFrame( + np.random.default_rng(2).integers(low=0, high=100, size=(6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + cm = "cubehelix" + ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm) + assert ax.collections[0].cmap.name == cm + + # verify turning off colorbar works + ax = df.plot.scatter(x="x", y="y", c="z", colorbar=False) + assert ax.collections[0].colorbar is None + + # verify that we can still plot a solid color + ax = df.plot.scatter(x=0, y=1, c="red") + assert ax.collections[0].colorbar is None + _check_colors(ax.collections, facecolors=["r"]) + + def test_plot_scatter_with_c_array(self): + # Ensure that we can pass an np.array straight through to matplotlib, + # this functionality was accidentally removed previously. + # See https://github.com/pandas-dev/pandas/issues/8852 for bug report + # + # Exercise colormap path and non-colormap path as they are independent + # + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + red_rgba = [1.0, 0.0, 0.0, 1.0] + green_rgba = [0.0, 1.0, 0.0, 1.0] + rgba_array = np.array([red_rgba, green_rgba]) + ax = df.plot.scatter(x="A", y="B", c=rgba_array) + # expect the face colors of the points in the non-colormap path to be + # identical to the values we supplied, normally we'd be on shaky ground + # comparing floats for equality but here we expect them to be + # identical. + tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array) + # we don't test the colors of the faces in this next plot because they + # are dependent on the spring colormap, which may change its colors + # later. + float_array = np.array([0.0, 1.0]) + df.plot.scatter(x="A", y="B", c=float_array, cmap="spring") + + def test_plot_scatter_with_s(self): + # this refers to GH 32904 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + + ax = df.plot.scatter(x="a", y="b", s="c") + tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes()) + + def test_plot_scatter_with_norm(self): + # added while fixing GH 45809 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + norm = mpl.colors.LogNorm() + ax = df.plot.scatter(x="a", y="b", c="c", norm=norm) + assert ax.collections[0].norm is norm + + def test_plot_scatter_without_norm(self): + # added while fixing GH 45809 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + ax = df.plot.scatter(x="a", y="b", c="c") + plot_norm = ax.collections[0].norm + color_min_max = (df.c.min(), df.c.max()) + default_norm = mpl.colors.Normalize(*color_min_max) + for value in df.c: + assert plot_norm(value) == default_norm(value) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {}, + {"legend": False}, + {"default_axes": True, "subplots": True}, + {"stacked": True}, + ], + ) + def test_plot_bar(self, kwargs): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + + _check_plot_works(df.plot.bar, **kwargs) + + @pytest.mark.slow + def test_plot_bar_int_col(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 15)), + index=list(string.ascii_letters[:10]), + columns=range(15), + ) + _check_plot_works(df.plot.bar) + + @pytest.mark.slow + def test_plot_bar_ticks(self): + df = DataFrame({"a": [0, 1], "b": [1, 0]}) + ax = _check_plot_works(df.plot.bar) + _check_ticks_props(ax, xrot=90) + + ax = df.plot.bar(rot=35, fontsize=10) + _check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10) + + @pytest.mark.slow + def test_plot_barh_ticks(self): + df = DataFrame({"a": [0, 1], "b": [1, 0]}) + ax = _check_plot_works(df.plot.barh) + _check_ticks_props(ax, yrot=0) + + ax = df.plot.barh(rot=55, fontsize=11) + _check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11) + + def test_boxplot(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + + ax = _check_plot_works(df.plot.box) + _check_text_labels(ax.get_xticklabels(), labels) + tm.assert_numpy_array_equal( + ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1) + ) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_series(self, hist_df): + df = hist_df + series = df["height"] + axes = series.plot.box(rot=40) + _check_ticks_props(axes, xrot=40, yrot=0) + + _check_plot_works(series.plot.box) + + def test_boxplot_series_positions(self, hist_df): + df = hist_df + positions = np.array([1, 6, 7]) + ax = df.plot.box(positions=positions) + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + _check_text_labels(ax.get_xticklabels(), labels) + tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_vertical(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + + # if horizontal, yticklabels are rotated + ax = df.plot.box(rot=50, fontsize=8, vert=False) + _check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8) + _check_text_labels(ax.get_yticklabels(), labels) + assert len(ax.lines) == 7 * len(numeric_cols) + + @pytest.mark.filterwarnings("ignore:Attempt:UserWarning") + def test_boxplot_vertical_subplots(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + axes = _check_plot_works( + df.plot.box, + default_axes=True, + subplots=True, + vert=False, + logx=True, + ) + _check_axes_shape(axes, axes_num=3, layout=(1, 3)) + _check_ax_scales(axes, xaxis="log") + for ax, label in zip(axes, labels): + _check_text_labels(ax.get_yticklabels(), [label]) + assert len(ax.lines) == 7 + + def test_boxplot_vertical_positions(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + positions = np.array([3, 2, 8]) + ax = df.plot.box(positions=positions, vert=False) + _check_text_labels(ax.get_yticklabels(), labels) + tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_return_type_invalid(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + msg = "return_type must be {None, 'axes', 'dict', 'both'}" + with pytest.raises(ValueError, match=msg): + df.plot.box(return_type="not_a_type") + + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_boxplot_return_type_invalid_type(self, return_type): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + result = df.plot.box(return_type=return_type) + _check_box_return_type(result, return_type) + + def test_kde_df(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))) + ax = _check_plot_works(df.plot, kind="kde") + expected = [pprint_thing(c) for c in df.columns] + _check_legend_labels(ax, labels=expected) + _check_ticks_props(ax, xrot=0) + + def test_kde_df_rot(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + ax = df.plot(kind="kde", rot=20, fontsize=5) + _check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5) + + def test_kde_df_subplots(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + axes = _check_plot_works( + df.plot, + default_axes=True, + kind="kde", + subplots=True, + ) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + def test_kde_df_logy(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + axes = df.plot(kind="kde", logy=True, subplots=True) + _check_ax_scales(axes, yaxis="log") + + def test_kde_missing_vals(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).uniform(size=(100, 4))) + df.loc[0, 0] = np.nan + _check_plot_works(df.plot, kind="kde") + + def test_hist_df(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))) + + ax = _check_plot_works(df.plot.hist) + expected = [pprint_thing(c) for c in df.columns] + _check_legend_labels(ax, labels=expected) + + axes = _check_plot_works( + df.plot.hist, + default_axes=True, + subplots=True, + logy=True, + ) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + _check_ax_scales(axes, yaxis="log") + + def test_hist_df_series(self): + series = Series(np.random.default_rng(2).random(10)) + axes = series.plot.hist(rot=40) + _check_ticks_props(axes, xrot=40, yrot=0) + + def test_hist_df_series_cumulative_density(self): + from matplotlib.patches import Rectangle + + series = Series(np.random.default_rng(2).random(10)) + ax = series.plot.hist(cumulative=True, bins=4, density=True) + # height of last bin (index 5) must be 1.0 + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + tm.assert_almost_equal(rects[-1].get_height(), 1.0) + + def test_hist_df_series_cumulative(self): + from matplotlib.patches import Rectangle + + series = Series(np.random.default_rng(2).random(10)) + ax = series.plot.hist(cumulative=True, bins=4) + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + + tm.assert_almost_equal(rects[-2].get_height(), 10.0) + + def test_hist_df_orientation(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + # if horizontal, yticklabels are rotated + axes = df.plot.hist(rot=50, fontsize=8, orientation="horizontal") + _check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8) + + @pytest.mark.parametrize( + "weights", [0.1 * np.ones(shape=(100,)), 0.1 * np.ones(shape=(100, 2))] + ) + def test_hist_weights(self, weights): + # GH 33173 + + df = DataFrame( + dict(zip(["A", "B"], np.random.default_rng(2).standard_normal((2, 100)))) + ) + + ax1 = _check_plot_works(df.plot, kind="hist", weights=weights) + ax2 = _check_plot_works(df.plot, kind="hist") + + patch_height_with_weights = [patch.get_height() for patch in ax1.patches] + + # original heights with no weights, and we manually multiply with example + # weights, so after multiplication, they should be almost same + expected_patch_height = [0.1 * patch.get_height() for patch in ax2.patches] + + tm.assert_almost_equal(patch_height_with_weights, expected_patch_height) + + def _check_box_coord( + self, + patches, + expected_y=None, + expected_h=None, + expected_x=None, + expected_w=None, + ): + result_y = np.array([p.get_y() for p in patches]) + result_height = np.array([p.get_height() for p in patches]) + result_x = np.array([p.get_x() for p in patches]) + result_width = np.array([p.get_width() for p in patches]) + # dtype is depending on above values, no need to check + + if expected_y is not None: + tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False) + if expected_h is not None: + tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False) + if expected_x is not None: + tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False) + if expected_w is not None: + tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False) + + @pytest.mark.parametrize( + "data", + [ + { + "A": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])), + "B": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])), + "C": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])), + }, + { + "A": np.repeat( + np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6]) + ), + "B": np.repeat( + np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8]) + ), + "C": np.repeat( + np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10]) + ), + }, + ], + ) + def test_hist_df_coord(self, data): + df = DataFrame(data) + + ax = df.plot.hist(bins=5) + self._check_box_coord( + ax.patches[:5], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + ax = df.plot.hist(bins=5, stacked=True) + self._check_box_coord( + ax.patches[:5], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_y=np.array([10, 9, 8, 7, 6]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_y=np.array([18, 17, 16, 15, 14]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + axes = df.plot.hist(bins=5, stacked=True, subplots=True) + self._check_box_coord( + axes[0].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + axes[1].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + axes[2].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + # horizontal + ax = df.plot.hist(bins=5, orientation="horizontal") + self._check_box_coord( + ax.patches[:5], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + ax = df.plot.hist(bins=5, stacked=True, orientation="horizontal") + self._check_box_coord( + ax.patches[:5], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_x=np.array([10, 9, 8, 7, 6]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_x=np.array([18, 17, 16, 15, 14]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + axes = df.plot.hist( + bins=5, stacked=True, subplots=True, orientation="horizontal" + ) + self._check_box_coord( + axes[0].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + axes[1].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + axes[2].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + def test_plot_int_columns(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))).cumsum() + _check_plot_works(df.plot, legend=True) + + @pytest.mark.parametrize( + "markers", + [ + {0: "^", 1: "+", 2: "o"}, + {0: "^", 1: "+"}, + ["^", "+", "o"], + ["^", "+"], + ], + ) + def test_style_by_column(self, markers): + import matplotlib.pyplot as plt + + fig = plt.gcf() + fig.clf() + fig.add_subplot(111) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 3))) + ax = df.plot(style=markers) + for idx, line in enumerate(ax.get_lines()[: len(markers)]): + assert line.get_marker() == markers[idx] + + def test_line_label_none(self): + s = Series([1, 2]) + ax = s.plot() + assert ax.get_legend() is None + + ax = s.plot(legend=True) + assert ax.get_legend().get_texts()[0].get_text() == "" + + @pytest.mark.parametrize( + "props, expected", + [ + ("boxprops", "boxes"), + ("whiskerprops", "whiskers"), + ("capprops", "caps"), + ("medianprops", "medians"), + ], + ) + def test_specified_props_kwd_plot_box(self, props, expected): + # GH 30346 + df = DataFrame({k: np.random.default_rng(2).random(100) for k in "ABC"}) + kwd = {props: {"color": "C1"}} + result = df.plot.box(return_type="dict", **kwd) + + assert result[expected][0].get_color() == "C1" + + def test_unordered_ts(self): + # GH#2609, GH#55906 + index = [date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)] + values = [3.0, 2.0, 1.0] + df = DataFrame( + np.array(values), + index=index, + columns=["test"], + ) + ax = df.plot() + xticks = ax.lines[0].get_xdata() + tm.assert_numpy_array_equal(xticks, np.array(index, dtype=object)) + ydata = ax.lines[0].get_ydata() + tm.assert_numpy_array_equal(ydata, np.array(values)) + + # even though we don't sort the data before passing it to matplotlib, + # the ticks are sorted + xticks = ax.xaxis.get_ticklabels() + xlocs = [x.get_position()[0] for x in xticks] + assert Index(xlocs).is_monotonic_increasing + xlabels = [x.get_text() for x in xticks] + assert pd.to_datetime(xlabels, format="%Y-%m-%d").is_monotonic_increasing + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_kind_both_ways(self, kind): + pytest.importorskip("scipy") + df = DataFrame({"x": [1, 2, 3]}) + df.plot(kind=kind) + getattr(df.plot, kind)() + + @pytest.mark.parametrize("kind", ["scatter", "hexbin"]) + def test_kind_both_ways_x_y(self, kind): + pytest.importorskip("scipy") + df = DataFrame({"x": [1, 2, 3]}) + df.plot("x", "x", kind=kind) + getattr(df.plot, kind)("x", "x") + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_all_invalid_plot_data(self, kind): + df = DataFrame(list("abcd")) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df.plot(kind=kind) + + @pytest.mark.parametrize( + "kind", list(plotting.PlotAccessor._common_kinds) + ["area"] + ) + def test_partially_invalid_plot_data_numeric(self, kind): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + dtype=object, + ) + df[np.random.default_rng(2).random(df.shape[0]) > 0.5] = "a" + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df.plot(kind=kind) + + def test_invalid_kind(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + msg = "invalid_plot_kind is not a valid plot kind" + with pytest.raises(ValueError, match=msg): + df.plot(kind="invalid_plot_kind") + + @pytest.mark.parametrize( + "x,y,lbl", + [ + (["B", "C"], "A", "a"), + (["A"], ["B", "C"], ["b", "c"]), + ], + ) + def test_invalid_xy_args(self, x, y, lbl): + # GH 18671, 19699 allows y to be list-like but not x + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + with pytest.raises(ValueError, match="x must be a label or position"): + df.plot(x=x, y=y, label=lbl) + + def test_bad_label(self): + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + msg = "label should be list-like and same length as y" + with pytest.raises(ValueError, match=msg): + df.plot(x="A", y=["B", "C"], label="bad_label") + + @pytest.mark.parametrize("x,y", [("A", "B"), (["A"], "B")]) + def test_invalid_xy_args_dup_cols(self, x, y): + # GH 18671, 19699 allows y to be list-like but not x + df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list("AAB")) + with pytest.raises(ValueError, match="x must be a label or position"): + df.plot(x=x, y=y) + + @pytest.mark.parametrize( + "x,y,lbl,colors", + [ + ("A", ["B"], ["b"], ["red"]), + ("A", ["B", "C"], ["b", "c"], ["red", "blue"]), + (0, [1, 2], ["bokeh", "cython"], ["green", "yellow"]), + ], + ) + def test_y_listlike(self, x, y, lbl, colors): + # GH 19699: tests list-like y and verifies lbls & colors + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + _check_plot_works(df.plot, x="A", y=y, label=lbl) + + ax = df.plot(x=x, y=y, label=lbl, color=colors) + assert len(ax.lines) == len(y) + _check_colors(ax.get_lines(), linecolors=colors) + + @pytest.mark.parametrize("x,y,colnames", [(0, 1, ["A", "B"]), (1, 0, [0, 1])]) + def test_xy_args_integer(self, x, y, colnames): + # GH 20056: tests integer args for xy and checks col names + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + df.columns = colnames + _check_plot_works(df.plot, x=x, y=y) + + def test_hexbin_basic(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + + ax = df.plot.hexbin(x="A", y="B", gridsize=10) + # TODO: need better way to test. This just does existence. + assert len(ax.collections) == 1 + + def test_hexbin_basic_subplots(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + # GH 6951 + axes = df.plot.hexbin(x="A", y="B", subplots=True) + # hexbin should have 2 axes in the figure, 1 for plotting and another + # is colorbar + assert len(axes[0].figure.axes) == 2 + # return value is single axes + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.parametrize("reduce_C", [None, np.std]) + def test_hexbin_with_c(self, reduce_C): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + + ax = df.plot.hexbin(x="A", y="B", C="C", reduce_C_function=reduce_C) + assert len(ax.collections) == 1 + + @pytest.mark.parametrize( + "kwargs, expected", + [ + ({}, "BuGn"), # default cmap + ({"colormap": "cubehelix"}, "cubehelix"), + ({"cmap": "YlGn"}, "YlGn"), + ], + ) + def test_hexbin_cmap(self, kwargs, expected): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + ax = df.plot.hexbin(x="A", y="B", **kwargs) + assert ax.collections[0].cmap.name == expected + + def test_pie_df_err(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + msg = "pie requires either y column or 'subplots=True'" + with pytest.raises(ValueError, match=msg): + df.plot.pie() + + @pytest.mark.parametrize("y", ["Y", 2]) + def test_pie_df(self, y): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + ax = _check_plot_works(df.plot.pie, y=y) + _check_text_labels(ax.texts, df.index) + + def test_pie_df_subplots(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + axes = _check_plot_works( + df.plot.pie, + default_axes=True, + subplots=True, + ) + assert len(axes) == len(df.columns) + for ax in axes: + _check_text_labels(ax.texts, df.index) + for ax, ylabel in zip(axes, df.columns): + assert ax.get_ylabel() == ylabel + + def test_pie_df_labels_colors(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + labels = ["A", "B", "C", "D", "E"] + color_args = ["r", "g", "b", "c", "m"] + axes = _check_plot_works( + df.plot.pie, + default_axes=True, + subplots=True, + labels=labels, + colors=color_args, + ) + assert len(axes) == len(df.columns) + + for ax in axes: + _check_text_labels(ax.texts, labels) + _check_colors(ax.patches, facecolors=color_args) + + def test_pie_df_nan(self): + df = DataFrame(np.random.default_rng(2).random((4, 4))) + for i in range(4): + df.iloc[i, i] = np.nan + _, axes = mpl.pyplot.subplots(ncols=4) + + # GH 37668 + kwargs = {"normalize": True} + + with tm.assert_produces_warning(None): + df.plot.pie(subplots=True, ax=axes, legend=True, **kwargs) + + base_expected = ["0", "1", "2", "3"] + for i, ax in enumerate(axes): + expected = list(base_expected) # force copy + expected[i] = "" + result = [x.get_text() for x in ax.texts] + assert result == expected + + # legend labels + # NaN's not included in legend with subplots + # see https://github.com/pandas-dev/pandas/issues/8390 + result_labels = [x.get_text() for x in ax.get_legend().get_texts()] + expected_labels = base_expected[:i] + base_expected[i + 1 :] + assert result_labels == expected_labels + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {"logy": True}, + {"logx": True, "logy": True}, + {"loglog": True}, + ], + ) + def test_errorbar_plot(self, kwargs): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + df_err = DataFrame(d_err) + + # check line plots + ax = _check_plot_works(df.plot, yerr=df_err, **kwargs) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + def test_errorbar_plot_bar(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + df_err = DataFrame(d_err) + ax = _check_plot_works( + (df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True + ) + _check_has_errorbars(ax, xerr=2, yerr=2) + + @pytest.mark.slow + def test_errorbar_plot_yerr_array(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + # yerr is raw error values + ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("yerr", ["yerr", "誤差"]) + def test_errorbar_plot_column_name(self, yerr): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + df[yerr] = np.ones(12) * 0.2 + + ax = _check_plot_works(df.plot, yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(df.plot, y="y", x="x", yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_external_valueerror(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + with tm.external_error_raised(ValueError): + df.plot(yerr=np.random.default_rng(2).standard_normal(11)) + + @pytest.mark.slow + def test_errorbar_plot_external_typeerror(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12}) + with tm.external_error_raised(TypeError): + df.plot(yerr=df_err) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + @pytest.mark.parametrize( + "y_err", + [ + Series(np.ones(12) * 0.2, name="x"), + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + ], + ) + def test_errorbar_plot_different_yerr(self, kind, y_err): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + + ax = _check_plot_works(df.plot, yerr=y_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + @pytest.mark.parametrize( + "y_err, x_err", + [ + ( + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + ), + (Series(np.ones(12) * 0.2, name="x"), Series(np.ones(12) * 0.2, name="x")), + (0.2, 0.2), + ], + ) + def test_errorbar_plot_different_yerr_xerr(self, kind, y_err, x_err): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + ax = _check_plot_works(df.plot, yerr=y_err, xerr=x_err, kind=kind) + _check_has_errorbars(ax, xerr=2, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + def test_errorbar_plot_different_yerr_xerr_subplots(self, kind): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + df_err = DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}) + axes = _check_plot_works( + df.plot, + default_axes=True, + yerr=df_err, + xerr=df_err, + subplots=True, + kind=kind, + ) + _check_has_errorbars(axes, xerr=1, yerr=1) + + @pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError) + def test_errorbar_plot_iterator(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + + # yerr is iterator + ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df))) + _check_has_errorbars(ax, xerr=0, yerr=2) + + def test_errorbar_with_integer_column_names(self): + # test with integer column names + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 2)))) + df_err = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 2)))) + ax = _check_plot_works(df.plot, yerr=df_err) + _check_has_errorbars(ax, xerr=0, yerr=2) + ax = _check_plot_works(df.plot, y=0, yerr=1) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar"]) + def test_errorbar_with_partial_columns_kind(self, kind): + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 3)))) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), columns=[0, 2] + ) + ax = _check_plot_works(df.plot, yerr=df_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + def test_errorbar_with_partial_columns_dti(self): + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 3)))) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), columns=[0, 2] + ) + ix = date_range("1/1/2000", periods=10, freq="ME") + df.set_index(ix, inplace=True) + df_err.set_index(ix, inplace=True) + ax = _check_plot_works(df.plot, yerr=df_err, kind="line") + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("err_box", [lambda x: x, DataFrame]) + def test_errorbar_with_partial_columns_box(self, err_box): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + err = err_box({"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4}) + ax = _check_plot_works(df.plot, yerr=err) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + def test_errorbar_timeseries(self, kind): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + + # check time-series plots + ix = date_range("1/1/2000", "1/1/2001", freq="ME") + tdf = DataFrame(d, index=ix) + tdf_err = DataFrame(d_err, index=ix) + + ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + axes = _check_plot_works( + tdf.plot, + default_axes=True, + kind=kind, + yerr=tdf_err, + subplots=True, + ) + _check_has_errorbars(axes, xerr=0, yerr=1) + + def test_errorbar_asymmetrical(self): + err = np.random.default_rng(2).random((3, 2, 5)) + + # each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]... + df = DataFrame(np.arange(15).reshape(3, 5)).T + + ax = df.plot(yerr=err, xerr=err / 2) + + yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1] + expected_0_0 = err[0, :, 0] * np.array([-1, 1]) + tm.assert_almost_equal(yerr_0_0, expected_0_0) + + msg = re.escape( + "Asymmetrical error bars should be provided with the shape (3, 2, 5)" + ) + with pytest.raises(ValueError, match=msg): + df.plot(yerr=err.T) + + def test_table(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, table=True) + _check_plot_works(df.plot, table=df) + + # GH 35945 UserWarning + with tm.assert_produces_warning(None): + ax = df.plot() + assert len(ax.tables) == 0 + plotting.table(ax, df.T) + assert len(ax.tables) == 1 + + def test_errorbar_scatter(self): + df = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((5, 2))), + index=range(5), + columns=["x", "y"], + ) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((5, 2))) / 5, + index=range(5), + columns=["x", "y"], + ) + + ax = _check_plot_works(df.plot.scatter, x="x", y="y") + _check_has_errorbars(ax, xerr=0, yerr=0) + ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err) + _check_has_errorbars(ax, xerr=1, yerr=0) + + ax = _check_plot_works(df.plot.scatter, x="x", y="y", yerr=df_err) + _check_has_errorbars(ax, xerr=0, yerr=1) + ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err, yerr=df_err) + _check_has_errorbars(ax, xerr=1, yerr=1) + + def test_errorbar_scatter_color(self): + def _check_errorbar_color(containers, expected, has_err="has_xerr"): + lines = [] + errs = next(c.lines for c in ax.containers if getattr(c, has_err, False)) + for el in errs: + if is_list_like(el): + lines.extend(el) + else: + lines.append(el) + err_lines = [x for x in lines if x in ax.collections] + _check_colors(err_lines, linecolors=np.array([expected] * len(err_lines))) + + # GH 8081 + df = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 5))), + columns=["a", "b", "c", "d", "e"], + ) + ax = df.plot.scatter(x="a", y="b", xerr="d", yerr="e", c="red") + _check_has_errorbars(ax, xerr=1, yerr=1) + _check_errorbar_color(ax.containers, "red", has_err="has_xerr") + _check_errorbar_color(ax.containers, "red", has_err="has_yerr") + + ax = df.plot.scatter(x="a", y="b", yerr="e", color="green") + _check_has_errorbars(ax, xerr=0, yerr=1) + _check_errorbar_color(ax.containers, "green", has_err="has_yerr") + + def test_scatter_unknown_colormap(self): + # GH#48726 + df = DataFrame({"a": [1, 2, 3], "b": 4}) + with pytest.raises((ValueError, KeyError), match="'unknown' is not a"): + df.plot(x="a", y="b", colormap="unknown", kind="scatter") + + def test_sharex_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + plt.close("all") + gs, axes = _generate_4_axes_via_gridspec() + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + + def _check(axes): + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[0], axes[2]]: + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + for ax in [axes[1], axes[3]]: + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax, sharex=True) + gs.tight_layout(plt.gcf()) + _check(axes) + plt.close("all") + + gs, axes = _generate_4_axes_via_gridspec() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True) + _check(axes) + + def test_sharex_false_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + gs, axes = _generate_4_axes_via_gridspec() + # without sharex, no labels should be touched! + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax) + + gs.tight_layout(plt.gcf()) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + def test_sharey_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + gs, axes = _generate_4_axes_via_gridspec() + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + + def _check(axes): + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + for ax in [axes[0], axes[1]]: + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[2], axes[3]]: + _check_visible(ax.get_yticklabels(), visible=False) + + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax, sharey=True) + gs.tight_layout(plt.gcf()) + _check(axes) + plt.close("all") + + gs, axes = _generate_4_axes_via_gridspec() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharey=True) + + gs.tight_layout(plt.gcf()) + _check(axes) + + def test_sharey_and_ax_tight(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + import matplotlib.pyplot as plt + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + gs, axes = _generate_4_axes_via_gridspec() + # without sharex, no labels should be touched! + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax) + + gs.tight_layout(plt.gcf()) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds) + def test_memory_leak(self, kind): + """Check that every plot type gets properly collected.""" + pytest.importorskip("scipy") + args = {} + if kind in ["hexbin", "scatter", "pie"]: + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + args = {"x": "A", "y": "B"} + elif kind == "area": + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).abs() + else: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + # Use a weakref so we can see if the object gets collected without + # also preventing it from being collected + ref = weakref.ref(df.plot(kind=kind, **args)) + + # have matplotlib delete all the figures + plt.close("all") + # force a garbage collection + gc.collect() + assert ref() is None + + def test_df_gridspec_patterns_vert_horiz(self): + # GH 10819 + from matplotlib import gridspec + import matplotlib.pyplot as plt + + ts = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1/1/2000", periods=10), + ) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=ts.index, + columns=list("AB"), + ) + + def _get_vertical_grid(): + gs = gridspec.GridSpec(3, 1) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:2, :]) + ax2 = fig.add_subplot(gs[2, :]) + return ax1, ax2 + + def _get_horizontal_grid(): + gs = gridspec.GridSpec(1, 3) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:, :2]) + ax2 = fig.add_subplot(gs[:, 2]) + return ax1, ax2 + + for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]: + ax1 = ts.plot(ax=ax1) + assert len(ax1.lines) == 1 + ax2 = df.plot(ax=ax2) + assert len(ax2.lines) == 2 + for ax in [ax1, ax2]: + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # subplots=True + for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]: + axes = df.plot(subplots=True, ax=[ax1, ax2]) + assert len(ax1.lines) == 1 + assert len(ax2.lines) == 1 + for ax in axes: + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # vertical / subplots / sharex=True / sharey=True + ax1, ax2 = _get_vertical_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True) + assert len(axes[0].lines) == 1 + assert len(axes[1].lines) == 1 + for ax in [ax1, ax2]: + # yaxis are visible because there is only one column + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of axes0 (top) are hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + plt.close("all") + + # horizontal / subplots / sharex=True / sharey=True + ax1, ax2 = _get_horizontal_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True) + assert len(axes[0].lines) == 1 + assert len(axes[1].lines) == 1 + _check_visible(axes[0].get_yticklabels(), visible=True) + # yaxis of axes1 (right) are hidden + _check_visible(axes[1].get_yticklabels(), visible=False) + for ax in [ax1, ax2]: + # xaxis are visible because there is only one column + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + def test_df_gridspec_patterns_boxed(self): + # GH 10819 + from matplotlib import gridspec + import matplotlib.pyplot as plt + + ts = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1/1/2000", periods=10), + ) + + # boxed + def _get_boxed_grid(): + gs = gridspec.GridSpec(3, 3) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:2, :2]) + ax2 = fig.add_subplot(gs[:2, 2]) + ax3 = fig.add_subplot(gs[2, :2]) + ax4 = fig.add_subplot(gs[2, 2]) + return ax1, ax2, ax3, ax4 + + axes = _get_boxed_grid() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=ts.index, + columns=list("ABCD"), + ) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + # axis are visible because these are not shared + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # subplots / sharex=True / sharey=True + axes = _get_boxed_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True) + for ax in axes: + assert len(ax.lines) == 1 + for ax in [axes[0], axes[2]]: # left column + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[1], axes[3]]: # right column + _check_visible(ax.get_yticklabels(), visible=False) + for ax in [axes[0], axes[1]]: # top row + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + for ax in [axes[2], axes[3]]: # bottom row + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + def test_df_grid_settings(self): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + _check_grid_settings( + DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}), + plotting.PlotAccessor._dataframe_kinds, + kws={"x": "a", "y": "b"}, + ) + + def test_plain_axes(self): + # supplied ax itself is a SubplotAxes, but figure contains also + # a plain Axes object (GH11556) + fig, ax = mpl.pyplot.subplots() + fig.add_axes([0.2, 0.2, 0.2, 0.2]) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + + def test_plain_axes_df(self): + # supplied ax itself is a plain Axes, but because the cmap keyword + # a new ax is created for the colorbar -> also multiples axes (GH11520) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(8), + "b": np.random.default_rng(2).standard_normal(8), + } + ) + fig = mpl.pyplot.figure() + ax = fig.add_axes((0, 0, 1, 1)) + df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv") + + def test_plain_axes_make_axes_locatable(self): + # other examples + fig, ax = mpl.pyplot.subplots() + from mpl_toolkits.axes_grid1 import make_axes_locatable + + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + Series(np.random.default_rng(2).random(10)).plot(ax=cax) + + def test_plain_axes_make_inset_axes(self): + fig, ax = mpl.pyplot.subplots() + from mpl_toolkits.axes_grid1.inset_locator import inset_axes + + iax = inset_axes(ax, width="30%", height=1.0, loc=3) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + Series(np.random.default_rng(2).random(10)).plot(ax=iax) + + @pytest.mark.parametrize("method", ["line", "barh", "bar"]) + def test_secondary_axis_font_size(self, method): + # GH: 12565 + df = ( + DataFrame( + np.random.default_rng(2).standard_normal((15, 2)), columns=list("AB") + ) + .assign(C=lambda df: df.B.cumsum()) + .assign(D=lambda df: df.C * 1.1) + ) + + fontsize = 20 + sy = ["C", "D"] + + kwargs = {"secondary_y": sy, "fontsize": fontsize, "mark_right": True} + ax = getattr(df.plot, method)(**kwargs) + _check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize) + + def test_x_string_values_ticks(self): + # Test if string plot index have a fixed xtick position + # GH: 7612, GH: 22334 + df = DataFrame( + { + "sales": [3, 2, 3], + "visits": [20, 42, 28], + "day": ["Monday", "Tuesday", "Wednesday"], + } + ) + ax = df.plot.area(x="day") + ax.set_xlim(-1, 3) + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + labels_position = dict(zip(xticklabels, ax.get_xticks())) + # Testing if the label stayed at the right position + assert labels_position["Monday"] == 0.0 + assert labels_position["Tuesday"] == 1.0 + assert labels_position["Wednesday"] == 2.0 + + def test_x_multiindex_values_ticks(self): + # Test if multiindex plot index have a fixed xtick position + # GH: 15912 + index = MultiIndex.from_product([[2012, 2013], [1, 2]]) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), + columns=["A", "B"], + index=index, + ) + ax = df.plot() + ax.set_xlim(-1, 4) + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + labels_position = dict(zip(xticklabels, ax.get_xticks())) + # Testing if the label stayed at the right position + assert labels_position["(2012, 1)"] == 0.0 + assert labels_position["(2012, 2)"] == 1.0 + assert labels_position["(2013, 1)"] == 2.0 + assert labels_position["(2013, 2)"] == 3.0 + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_xlim_plot_line(self, kind): + # test if xlim is set correctly in plot.line and plot.area + # GH 27686 + df = DataFrame([2, 4], index=[1, 2]) + ax = df.plot(kind=kind) + xlims = ax.get_xlim() + assert xlims[0] < 1 + assert xlims[1] > 2 + + def test_xlim_plot_line_correctly_in_mixed_plot_type(self): + # test if xlim is set correctly when ax contains multiple different kinds + # of plots, GH 27686 + fig, ax = mpl.pyplot.subplots() + + indexes = ["k1", "k2", "k3", "k4"] + df = DataFrame( + { + "s1": [1000, 2000, 1500, 2000], + "s2": [900, 1400, 2000, 3000], + "s3": [1500, 1500, 1600, 1200], + "secondary_y": [1, 3, 4, 3], + }, + index=indexes, + ) + df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False) + df[["secondary_y"]].plot(ax=ax, secondary_y=True) + + xlims = ax.get_xlim() + assert xlims[0] < 0 + assert xlims[1] > 3 + + # make sure axis labels are plotted correctly as well + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + assert xticklabels == indexes + + def test_plot_no_rows(self): + # GH 27758 + df = DataFrame(columns=["foo"], dtype=int) + assert df.empty + ax = df.plot() + assert len(ax.get_lines()) == 1 + line = ax.get_lines()[0] + assert len(line.get_xdata()) == 0 + assert len(line.get_ydata()) == 0 + + def test_plot_no_numeric_data(self): + df = DataFrame(["a", "b", "c"]) + with pytest.raises(TypeError, match="no numeric data to plot"): + df.plot() + + @pytest.mark.parametrize( + "kind", ("line", "bar", "barh", "hist", "kde", "density", "area", "pie") + ) + def test_group_subplot(self, kind): + pytest.importorskip("scipy") + d = { + "a": np.arange(10), + "b": np.arange(10) + 1, + "c": np.arange(10) + 1, + "d": np.arange(10), + "e": np.arange(10), + } + df = DataFrame(d) + + axes = df.plot(subplots=[("b", "e"), ("c", "d")], kind=kind) + assert len(axes) == 3 # 2 groups + single column a + + expected_labels = (["b", "e"], ["c", "d"], ["a"]) + for ax, labels in zip(axes, expected_labels): + if kind != "pie": + _check_legend_labels(ax, labels=labels) + if kind == "line": + assert len(ax.lines) == len(labels) + + def test_group_subplot_series_notimplemented(self): + ser = Series(range(1)) + msg = "An iterable subplots for a Series" + with pytest.raises(NotImplementedError, match=msg): + ser.plot(subplots=[("a",)]) + + def test_group_subplot_multiindex_notimplemented(self): + df = DataFrame(np.eye(2), columns=MultiIndex.from_tuples([(0, 1), (1, 2)])) + msg = "An iterable subplots for a DataFrame with a MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + df.plot(subplots=[(0, 1)]) + + def test_group_subplot_nonunique_cols_notimplemented(self): + df = DataFrame(np.eye(2), columns=["a", "a"]) + msg = "An iterable subplots for a DataFrame with non-unique" + with pytest.raises(NotImplementedError, match=msg): + df.plot(subplots=[("a",)]) + + @pytest.mark.parametrize( + "subplots, expected_msg", + [ + (123, "subplots should be a bool or an iterable"), + ("a", "each entry should be a list/tuple"), # iterable of non-iterable + ((1,), "each entry should be a list/tuple"), # iterable of non-iterable + (("a",), "each entry should be a list/tuple"), # iterable of strings + ], + ) + def test_group_subplot_bad_input(self, subplots, expected_msg): + # Make sure error is raised when subplots is not a properly + # formatted iterable. Only iterables of iterables are permitted, and + # entries should not be strings. + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match=expected_msg): + df.plot(subplots=subplots) + + def test_group_subplot_invalid_column_name(self): + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match=r"Column label\(s\) \['bad_name'\]"): + df.plot(subplots=[("a", "bad_name")]) + + def test_group_subplot_duplicated_column(self): + d = {"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match="should be in only one subplot"): + df.plot(subplots=[("a", "b"), ("a", "c")]) + + @pytest.mark.parametrize("kind", ("box", "scatter", "hexbin")) + def test_group_subplot_invalid_kind(self, kind): + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + with pytest.raises( + ValueError, match="When subplots is an iterable, kind must be one of" + ): + df.plot(subplots=[("a", "b")], kind=kind) + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [ + (None, "", "new"), + ("old", "old", "new"), + (None, "", ""), + (None, "", 1), + (None, "", [1, 2]), + ], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar"]) + def test_xlabel_ylabel_dataframe_single_plot( + self, kind, index_name, old_label, new_label + ): + # GH 9093 + df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) + df.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name + ax = df.plot(kind=kind) + assert ax.get_xlabel() == old_label + assert ax.get_ylabel() == "" + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + ax = df.plot(kind=kind, ylabel=new_label, xlabel=new_label) + assert ax.get_ylabel() == str(new_label) + assert ax.get_xlabel() == str(new_label) + + @pytest.mark.parametrize( + "xlabel, ylabel", + [ + (None, None), + ("X Label", None), + (None, "Y Label"), + ("X Label", "Y Label"), + ], + ) + @pytest.mark.parametrize("kind", ["scatter", "hexbin"]) + def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel): + # GH 37001 + xcol = "Type A" + ycol = "Type B" + df = DataFrame([[1, 2], [2, 5]], columns=[xcol, ycol]) + + # default is the labels are column names + ax = df.plot(kind=kind, x=xcol, y=ycol, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == (xcol if xlabel is None else xlabel) + assert ax.get_ylabel() == (ycol if ylabel is None else ylabel) + + @pytest.mark.parametrize("secondary_y", (False, True)) + def test_secondary_y(self, secondary_y): + ax_df = DataFrame([0]).plot( + secondary_y=secondary_y, ylabel="Y", ylim=(0, 100), yticks=[99] + ) + for ax in ax_df.figure.axes: + if ax.yaxis.get_visible(): + assert ax.get_ylabel() == "Y" + assert ax.get_ylim() == (0, 100) + assert ax.get_yticks()[0] == 99 + + @pytest.mark.slow + def test_plot_no_warning(self): + # GH 55138 + # TODO(3.0): this can be removed once Period[B] deprecation is enforced + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + with tm.assert_produces_warning(False): + _ = df.plot() + _ = df.T.plot() + + +def _generate_4_axes_via_gridspec(): + import matplotlib.pyplot as plt + + gs = mpl.gridspec.GridSpec(2, 2) + ax_tl = plt.subplot(gs[0, 0]) + ax_ll = plt.subplot(gs[1, 0]) + ax_tr = plt.subplot(gs[0, 1]) + ax_lr = plt.subplot(gs[1, 1]) + + return gs, [ax_tl, ax_ll, ax_tr, ax_lr] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py new file mode 100644 index 0000000000000000000000000000000000000000..ff1edd323ef280cef5e7e79aa809906434a86407 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py @@ -0,0 +1,670 @@ +""" Test cases for DataFrame.plot """ +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_colors, + _check_plot_works, + _unpack_cycler, +) +from pandas.util.version import Version + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") +cm = pytest.importorskip("matplotlib.cm") + + +def _check_colors_box(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None): + if fliers_c is None: + fliers_c = "k" + _check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"])) + _check_colors(bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])) + _check_colors(bp["medians"], linecolors=[medians_c] * len(bp["medians"])) + _check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"])) + _check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"])) + + +class TestDataFrameColor: + @pytest.mark.parametrize( + "color", ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"] + ) + def test_mpl2_color_cycle_str(self, color): + # GH 15516 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + _check_plot_works(df.plot, color=color) + + def test_color_single_series_list(self): + # GH 3486 + df = DataFrame({"A": [1, 2, 3]}) + _check_plot_works(df.plot, color=["red"]) + + @pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)]) + def test_rgb_tuple_color(self, color): + # GH 16695 + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + _check_plot_works(df.plot, x="x", y="y", color=color) + + def test_color_empty_string(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + with pytest.raises(ValueError, match="Invalid color argument:"): + df.plot(color="") + + def test_color_and_style_arguments(self): + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + # passing both 'color' and 'style' arguments should be allowed + # if there is no color symbol in the style strings: + ax = df.plot(color=["red", "black"], style=["-", "--"]) + # check that the linestyles are correctly set: + linestyle = [line.get_linestyle() for line in ax.lines] + assert linestyle == ["-", "--"] + # check that the colors are correctly set: + color = [line.get_color() for line in ax.lines] + assert color == ["red", "black"] + # passing both 'color' and 'style' arguments should not be allowed + # if there is a color symbol in the style strings: + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' keyword " + "argument. Please use one or the other or pass 'style' without a color " + "symbol" + ) + with pytest.raises(ValueError, match=msg): + df.plot(color=["red", "black"], style=["k-", "r--"]) + + @pytest.mark.parametrize( + "color, expected", + [ + ("green", ["green"] * 4), + (["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]), + ], + ) + def test_color_and_marker(self, color, expected): + # GH 21003 + df = DataFrame(np.random.default_rng(2).random((7, 4))) + ax = df.plot(color=color, style="d--") + # check colors + result = [i.get_color() for i in ax.lines] + assert result == expected + # check markers and linestyles + assert all(i.get_linestyle() == "--" for i in ax.lines) + assert all(i.get_marker() == "d" for i in ax.lines) + + def test_bar_colors(self): + default_colors = _unpack_cycler(plt.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar() + _check_colors(ax.patches[::5], facecolors=default_colors[:5]) + + def test_bar_colors_custom(self): + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar(color=custom_colors) + _check_colors(ax.patches[::5], facecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_bar_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot.bar(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)] + _check_colors(ax.patches[::5], facecolors=rgba_colors) + + def test_bar_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.loc[:, [0]].plot.bar(color="DodgerBlue") + _check_colors([ax.patches[0]], facecolors=["DodgerBlue"]) + + def test_bar_colors_green(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(kind="bar", color="green") + _check_colors(ax.patches[::5], facecolors=["green"] * 5) + + def test_bar_user_colors(self): + df = DataFrame( + {"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]} + ) + # This should *only* work when `y` is specified, else + # we use one color per column + ax = df.plot.bar(y="A", color=df["color"]) + result = [p.get_facecolor() for p in ax.patches] + expected = [ + (1.0, 0.0, 0.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (1.0, 0.0, 0.0, 1.0), + ] + assert result == expected + + def test_if_scatterplot_colorbar_affects_xaxis_visibility(self): + # addressing issue #10611, to ensure colobar does not + # interfere with x-axis label and ticklabels with + # ipython inline backend. + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + ax1 = df.plot.scatter(x="A label", y="B label") + ax2 = df.plot.scatter(x="A label", y="B label", c="C label") + + vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()] + vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()] + assert vis1 == vis2 + + vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()] + vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()] + assert vis1 == vis2 + + assert ( + ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible() + ) + + def test_if_hexbin_xaxis_label_is_visible(self): + # addressing issue #10678, to ensure colobar does not + # interfere with x-axis label and ticklabels with + # ipython inline backend. + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + ax = df.plot.hexbin("A label", "B label", gridsize=12) + assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels()) + assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels()) + assert ax.xaxis.get_label().get_visible() + + def test_if_scatterplot_colorbars_are_next_to_parent_axes(self): + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + fig, axes = plt.subplots(1, 2) + df.plot.scatter("A label", "B label", c="C label", ax=axes[0]) + df.plot.scatter("A label", "B label", c="C label", ax=axes[1]) + plt.tight_layout() + + points = np.array([ax.get_position().get_points() for ax in fig.axes]) + axes_x_coords = points[:, :, 0] + parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :] + colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :] + assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all() + + @pytest.mark.parametrize("cmap", [None, "Greys"]) + def test_scatter_with_c_column_name_with_colors(self, cmap): + # https://github.com/pandas-dev/pandas/issues/34316 + + df = DataFrame( + [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]], + columns=["length", "width"], + ) + df["species"] = ["r", "r", "g", "g", "b"] + if cmap is not None: + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + ax = df.plot.scatter(x=0, y=1, cmap=cmap, c="species") + else: + ax = df.plot.scatter(x=0, y=1, c="species", cmap=cmap) + assert ax.collections[0].colorbar is None + + def test_scatter_colors(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + with pytest.raises(TypeError, match="Specify exactly one of `c` and `color`"): + df.plot.scatter(x="a", y="b", c="c", color="green") + + def test_scatter_colors_not_raising_warnings(self): + # GH-53908. Do not raise UserWarning: No data for colormapping + # provided via 'c'. Parameters 'cmap' will be ignored + df = DataFrame({"x": [1, 2, 3], "y": [1, 2, 3]}) + with tm.assert_produces_warning(None): + df.plot.scatter(x="x", y="y", c="b") + + def test_scatter_colors_default(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + ax = df.plot.scatter(x="a", y="b", c="c") + tm.assert_numpy_array_equal( + ax.collections[0].get_facecolor()[0], + np.array(mpl.colors.ColorConverter.to_rgba(default_colors[0])), + ) + + def test_scatter_colors_white(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + ax = df.plot.scatter(x="a", y="b", color="white") + tm.assert_numpy_array_equal( + ax.collections[0].get_facecolor()[0], + np.array([1, 1, 1, 1], dtype=np.float64), + ) + + def test_scatter_colorbar_different_cmap(self): + # GH 33389 + df = DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]}) + df["x2"] = df["x"] + 1 + + _, ax = plt.subplots() + df.plot("x", "y", c="c", kind="scatter", cmap="cividis", ax=ax) + df.plot("x2", "y", c="c", kind="scatter", cmap="magma", ax=ax) + + assert ax.collections[0].cmap.name == "cividis" + assert ax.collections[1].cmap.name == "magma" + + def test_line_colors(self): + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + plt.close("all") + + ax2 = df.plot(color=custom_colors) + lines2 = ax2.get_lines() + + for l1, l2 in zip(ax.get_lines(), lines2): + assert l1.get_color() == l2.get_color() + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_line_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=rgba_colors) + + def test_line_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + ax = df.loc[:, [0]].plot(color="DodgerBlue") + _check_colors(ax.lines, linecolors=["DodgerBlue"]) + + def test_line_colors_single_color(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(color="red") + _check_colors(ax.get_lines(), linecolors=["red"] * 5) + + def test_line_colors_hex(self): + # GH 10299 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"] + ax = df.plot(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + def test_dont_modify_colors(self): + colors = ["r", "g", "b"] + DataFrame(np.random.default_rng(2).random((10, 2))).plot(color=colors) + assert len(colors) == 3 + + def test_line_colors_and_styles_subplots(self): + # GH 9894 + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + axes = df.plot(subplots=True) + for ax, c in zip(axes, list(default_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("color", ["k", "green"]) + def test_line_colors_and_styles_subplots_single_color_str(self, color): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(subplots=True, color=color) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=[color]) + + @pytest.mark.parametrize("color", ["rgcby", list("rgcby")]) + def test_line_colors_and_styles_subplots_custom_colors(self, color): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(color=color, subplots=True) + for ax, c in zip(axes, list(color)): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_line_colors_and_styles_subplots_colormap_hex(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # GH 10299 + custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"] + axes = df.plot(color=custom_colors, subplots=True) + for ax, c in zip(axes, list(custom_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("cmap", ["jet", cm.jet]) + def test_line_colors_and_styles_subplots_colormap_subplot(self, cmap): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + axes = df.plot(colormap=cmap, subplots=True) + for ax, c in zip(axes, rgba_colors): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_line_colors_and_styles_subplots_single_col(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True) + _check_colors(axes[0].lines, linecolors=["DodgerBlue"]) + + def test_line_colors_and_styles_subplots_single_char(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # single character style + axes = df.plot(style="r", subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=["r"]) + + def test_line_colors_and_styles_subplots_list_styles(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + styles = list("rgcby") + axes = df.plot(style=styles, subplots=True) + for ax, c in zip(axes, styles): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_area_colors(self): + from matplotlib.collections import PolyCollection + + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).random((5, 5))) + + ax = df.plot.area(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + _check_colors(poly, facecolors=custom_colors) + + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=custom_colors) + + for h in handles: + assert h.get_alpha() is None + + def test_area_colors_poly(self): + from matplotlib import cm + from matplotlib.collections import PolyCollection + + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.area(colormap="jet") + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=jet_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + _check_colors(poly, facecolors=jet_colors) + + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=jet_colors) + for h in handles: + assert h.get_alpha() is None + + def test_area_colors_stacked_false(self): + from matplotlib import cm + from matplotlib.collections import PolyCollection + + df = DataFrame(np.random.default_rng(2).random((5, 5))) + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + # When stacked=False, alpha is set to 0.5 + ax = df.plot.area(colormap=cm.jet, stacked=False) + _check_colors(ax.get_lines(), linecolors=jet_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors] + _check_colors(poly, facecolors=jet_with_alpha) + + handles, _ = ax.get_legend_handles_labels() + linecolors = jet_with_alpha + _check_colors(handles[: len(jet_colors)], linecolors=linecolors) + for h in handles: + assert h.get_alpha() == 0.5 + + def test_hist_colors(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.hist() + _check_colors(ax.patches[::10], facecolors=default_colors[:5]) + + def test_hist_colors_single_custom(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = "rgcby" + ax = df.plot.hist(color=custom_colors) + _check_colors(ax.patches[::10], facecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_hist_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.hist(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)] + _check_colors(ax.patches[::10], facecolors=rgba_colors) + + def test_hist_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.loc[:, [0]].plot.hist(color="DodgerBlue") + _check_colors([ax.patches[0]], facecolors=["DodgerBlue"]) + + def test_hist_colors_single_color(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(kind="hist", color="green") + _check_colors(ax.patches[::10], facecolors=["green"] * 5) + + def test_kde_colors(self): + pytest.importorskip("scipy") + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).random((5, 5))) + + ax = df.plot.kde(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_kde_colors_cmap(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.kde(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=rgba_colors) + + def test_kde_colors_and_styles_subplots(self): + pytest.importorskip("scipy") + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + axes = df.plot(kind="kde", subplots=True) + for ax, c in zip(axes, list(default_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("colormap", ["k", "red"]) + def test_kde_colors_and_styles_subplots_single_col_str(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(kind="kde", color=colormap, subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=[colormap]) + + def test_kde_colors_and_styles_subplots_custom_color(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = "rgcby" + axes = df.plot(kind="kde", color=custom_colors, subplots=True) + for ax, c in zip(axes, list(custom_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_kde_colors_and_styles_subplots_cmap(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + axes = df.plot(kind="kde", colormap=colormap, subplots=True) + for ax, c in zip(axes, rgba_colors): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_kde_colors_and_styles_subplots_single_col(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True) + _check_colors(axes[0].lines, linecolors=["DodgerBlue"]) + + def test_kde_colors_and_styles_subplots_single_char(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + # single character style + axes = df.plot(kind="kde", style="r", subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=["r"]) + + def test_kde_colors_and_styles_subplots_list(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + styles = list("rgcby") + axes = df.plot(kind="kde", style=styles, subplots=True) + for ax, c in zip(axes, styles): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_boxplot_colors(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + bp = df.plot.box(return_type="dict") + _check_colors_box( + bp, + default_colors[0], + default_colors[0], + default_colors[2], + default_colors[0], + ) + + def test_boxplot_colors_dict_colors(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + dict_colors = { + "boxes": "#572923", + "whiskers": "#982042", + "medians": "#804823", + "caps": "#123456", + } + bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict") + _check_colors_box( + bp, + dict_colors["boxes"], + dict_colors["whiskers"], + dict_colors["medians"], + dict_colors["caps"], + "r", + ) + + def test_boxplot_colors_default_color(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # partial colors + dict_colors = {"whiskers": "c", "medians": "m"} + bp = df.plot.box(color=dict_colors, return_type="dict") + _check_colors_box(bp, default_colors[0], "c", "m", default_colors[0]) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_boxplot_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + bp = df.plot.box(colormap=colormap, return_type="dict") + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)] + _check_colors_box( + bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0] + ) + + def test_boxplot_colors_single(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # string color is applied to all artists except fliers + bp = df.plot.box(color="DodgerBlue", return_type="dict") + _check_colors_box(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue") + + def test_boxplot_colors_tuple(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # tuple is also applied to all artists except fliers + bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict") + _check_colors_box(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456") + + def test_boxplot_colors_invalid(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + msg = re.escape( + "color dict contains invalid key 'xxxx'. The key must be either " + "['boxes', 'whiskers', 'medians', 'caps']" + ) + with pytest.raises(ValueError, match=msg): + # Color contains invalid key results in ValueError + df.plot.box(color={"boxes": "red", "xxxx": "blue"}) + + def test_default_color_cycle(self): + import cycler + + colors = list("rgbk") + plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + ax = df.plot() + + expected = _unpack_cycler(plt.rcParams)[:3] + _check_colors(ax.get_lines(), linecolors=expected) + + def test_no_color_bar(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + ax = df.plot.hexbin(x="A", y="B", colorbar=None) + assert ax.collections[0].colorbar is None + + def test_mixing_cmap_and_colormap_raises(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + msg = "Only specify one of `cmap` and `colormap`" + with pytest.raises(TypeError, match=msg): + df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn") + + def test_passed_bar_colors(self): + color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)] + colormap = mpl.colors.ListedColormap(color_tuples) + barplot = DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap) + assert color_tuples == [c.get_facecolor() for c in barplot.patches] + + def test_rcParams_bar_colors(self): + color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)] + with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}): + barplot = DataFrame([[1, 2, 3]]).plot(kind="bar") + assert color_tuples == [c.get_facecolor() for c in barplot.patches] + + def test_colors_of_columns_with_same_name(self): + # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136 + # Creating a DataFrame with duplicate column labels and testing colors of them. + df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) + df1 = DataFrame({"a": [2, 4, 6]}) + df_concat = pd.concat([df, df1], axis=1) + result = df_concat.plot() + legend = result.get_legend() + if Version(mpl.__version__) < Version("3.7"): + handles = legend.legendHandles + else: + handles = legend.legend_handles + for legend, line in zip(handles, result.lines): + assert legend.get_color() == line.get_color() + + def test_invalid_colormap(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 2)), columns=["A", "B"] + ) + msg = "(is not a valid value)|(is not a known colormap)" + with pytest.raises((ValueError, KeyError), match=msg): + df.plot(colormap="invalid_colormap") + + def test_dataframe_none_color(self): + # GH51953 + df = DataFrame([[1, 2, 3]]) + ax = df.plot(color=None) + expected = _unpack_cycler(mpl.pyplot.rcParams)[:3] + _check_colors(ax.get_lines(), linecolors=expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..f1924185a3df1cae2f0df89ec84225cd68f8fa6d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py @@ -0,0 +1,72 @@ +""" Test cases for DataFrame.plot """ + +import pytest + +from pandas import DataFrame +from pandas.tests.plotting.common import _check_visible + +pytest.importorskip("matplotlib") + + +class TestDataFramePlotsGroupby: + def _assert_ytickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + _check_visible(ax.get_yticklabels(), visible=exp) + + def _assert_xtickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + _check_visible(ax.get_xticklabels(), visible=exp) + + @pytest.mark.parametrize( + "kwargs, expected", + [ + # behavior without keyword + ({}, [True, False, True, False]), + # set sharey=True should be identical + ({"sharey": True}, [True, False, True, False]), + # sharey=False, all yticklabels should be visible + ({"sharey": False}, [True, True, True, True]), + ], + ) + def test_groupby_boxplot_sharey(self, kwargs, expected): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharey can now be switched check whether the right + # pair of axes is turned on or off + df = DataFrame( + { + "a": [-1.43, -0.15, -3.70, -1.43, -0.14], + "b": [0.56, 0.84, 0.29, 0.56, 0.85], + "c": [0, 1, 2, 3, 1], + }, + index=[0, 1, 2, 3, 4], + ) + axes = df.groupby("c").boxplot(**kwargs) + self._assert_ytickslabels_visibility(axes, expected) + + @pytest.mark.parametrize( + "kwargs, expected", + [ + # behavior without keyword + ({}, [True, True, True, True]), + # set sharex=False should be identical + ({"sharex": False}, [True, True, True, True]), + # sharex=True, xticklabels should be visible + # only for bottom plots + ({"sharex": True}, [False, False, True, True]), + ], + ) + def test_groupby_boxplot_sharex(self, kwargs, expected): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharex can now be switched check whether the right + # pair of axes is turned on or off + + df = DataFrame( + { + "a": [-1.43, -0.15, -3.70, -1.43, -0.14], + "b": [0.56, 0.84, 0.29, 0.56, 0.85], + "c": [0, 1, 2, 3, 1], + }, + index=[0, 1, 2, 3, 4], + ) + axes = df.groupby("c").boxplot(**kwargs) + self._assert_xtickslabels_visibility(axes, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py new file mode 100644 index 0000000000000000000000000000000000000000..402a4b9531e5d4857d0d6e9d7cda2c002d0469d4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py @@ -0,0 +1,272 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + date_range, +) +from pandas.tests.plotting.common import ( + _check_legend_labels, + _check_legend_marker, + _check_text_labels, +) +from pandas.util.version import Version + +mpl = pytest.importorskip("matplotlib") + + +class TestFrameLegend: + @pytest.mark.xfail( + reason=( + "Open bug in matplotlib " + "https://github.com/matplotlib/matplotlib/issues/11357" + ) + ) + def test_mixed_yerr(self): + # https://github.com/pandas-dev/pandas/issues/39522 + from matplotlib.collections import LineCollection + from matplotlib.lines import Line2D + + df = DataFrame([{"x": 1, "a": 1, "b": 1}, {"x": 2, "a": 2, "b": 3}]) + + ax = df.plot("x", "a", c="orange", yerr=0.1, label="orange") + df.plot("x", "b", c="blue", yerr=None, ax=ax, label="blue") + + legend = ax.get_legend() + if Version(mpl.__version__) < Version("3.7"): + result_handles = legend.legendHandles + else: + result_handles = legend.legend_handles + + assert isinstance(result_handles[0], LineCollection) + assert isinstance(result_handles[1], Line2D) + + def test_legend_false(self): + # https://github.com/pandas-dev/pandas/issues/40044 + df = DataFrame({"a": [1, 1], "b": [2, 3]}) + df2 = DataFrame({"d": [2.5, 2.5]}) + + ax = df.plot(legend=True, color={"a": "blue", "b": "green"}, secondary_y="b") + df2.plot(legend=True, color={"d": "red"}, ax=ax) + legend = ax.get_legend() + if Version(mpl.__version__) < Version("3.7"): + handles = legend.legendHandles + else: + handles = legend.legend_handles + result = [handle.get_color() for handle in handles] + expected = ["blue", "green", "red"] + assert result == expected + + @pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "area", "hist"]) + def test_df_legend_labels(self, kind): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + df2 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"] + ) + df3 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["g", "h", "i"] + ) + df4 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["j", "k", "l"] + ) + + ax = df.plot(kind=kind, legend=True) + _check_legend_labels(ax, labels=df.columns) + + ax = df2.plot(kind=kind, legend=False, ax=ax) + _check_legend_labels(ax, labels=df.columns) + + ax = df3.plot(kind=kind, legend=True, ax=ax) + _check_legend_labels(ax, labels=df.columns.union(df3.columns)) + + ax = df4.plot(kind=kind, legend="reverse", ax=ax) + expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns)) + _check_legend_labels(ax, labels=expected) + + def test_df_legend_labels_secondary_y(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + df2 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"] + ) + df3 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["g", "h", "i"] + ) + # Secondary Y + ax = df.plot(legend=True, secondary_y="b") + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df2.plot(legend=False, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"]) + + def test_df_legend_labels_time_series(self): + # Time Series + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["d", "e", "f"], + index=ind, + ) + df3 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["g", "h", "i"], + index=ind, + ) + ax = df.plot(legend=True, secondary_y="b") + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df2.plot(legend=False, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df3.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"]) + + def test_df_legend_labels_time_series_scatter(self): + # Time Series + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["d", "e", "f"], + index=ind, + ) + df3 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["g", "h", "i"], + index=ind, + ) + # scatter + ax = df.plot.scatter(x="a", y="b", label="data1") + _check_legend_labels(ax, labels=["data1"]) + ax = df2.plot.scatter(x="d", y="e", legend=False, label="data2", ax=ax) + _check_legend_labels(ax, labels=["data1"]) + ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax) + _check_legend_labels(ax, labels=["data1", "data3"]) + + def test_df_legend_labels_time_series_no_mutate(self): + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + # ensure label args pass through and + # index name does not mutate + # column names don't mutate + df5 = df.set_index("a") + ax = df5.plot(y="b") + _check_legend_labels(ax, labels=["b"]) + ax = df5.plot(y="b", label="LABEL_b") + _check_legend_labels(ax, labels=["LABEL_b"]) + _check_text_labels(ax.xaxis.get_label(), "a") + ax = df5.plot(y="c", label="LABEL_c", ax=ax) + _check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"]) + assert df5.columns.tolist() == ["b", "c"] + + def test_missing_marker_multi_plots_on_same_ax(self): + # GH 18222 + df = DataFrame(data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"]) + _, ax = mpl.pyplot.subplots(nrows=1, ncols=3) + # Left plot + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0]) + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0]) + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0]) + _check_legend_labels(ax[0], labels=["r", "g", "b"]) + _check_legend_marker(ax[0], expected_markers=["o", "x", "o"]) + # Center plot + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1]) + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1]) + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1]) + _check_legend_labels(ax[1], labels=["b", "r", "g"]) + _check_legend_marker(ax[1], expected_markers=["o", "o", "x"]) + # Right plot + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2]) + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2]) + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2]) + _check_legend_labels(ax[2], labels=["g", "b", "r"]) + _check_legend_marker(ax[2], expected_markers=["x", "o", "o"]) + + def test_legend_name(self): + multi = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + columns=[np.array(["a", "a", "b", "b"]), np.array(["x", "y", "x", "y"])], + ) + multi.columns.names = ["group", "individual"] + + ax = multi.plot() + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(legend=True, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + df.columns.name = "new" + ax = df.plot(legend=False, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + ax = df.plot(legend=True, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "new") + + @pytest.mark.parametrize( + "kind", + [ + "line", + "bar", + "barh", + pytest.param("kde", marks=td.skip_if_no("scipy")), + "area", + "hist", + ], + ) + def test_no_legend(self, kind): + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + ax = df.plot(kind=kind, legend=False) + _check_legend_labels(ax, visible=False) + + def test_missing_markers_legend(self): + # 14958 + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), columns=["A", "B", "C"] + ) + ax = df.plot(y=["A"], marker="x", linestyle="solid") + df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax) + df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax) + + _check_legend_labels(ax, labels=["A", "B", "C"]) + _check_legend_marker(ax, expected_markers=["x", "o", "<"]) + + def test_missing_markers_legend_using_style(self): + # 14563 + df = DataFrame( + { + "A": [1, 2, 3, 4, 5, 6], + "B": [2, 4, 1, 3, 2, 4], + "C": [3, 3, 2, 6, 4, 2], + "X": [1, 2, 3, 4, 5, 6], + } + ) + + _, ax = mpl.pyplot.subplots() + for kind in "ABC": + df.plot("X", kind, label=kind, ax=ax, style=".") + + _check_legend_labels(ax, labels=["A", "B", "C"]) + _check_legend_marker(ax, expected_markers=[".", ".", "."]) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py new file mode 100644 index 0000000000000000000000000000000000000000..4d8d8fa4cdee38d568d099019e89114fb0cdb4e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py @@ -0,0 +1,752 @@ +""" Test cases for DataFrame.plot """ + +import string + +import numpy as np +import pytest + +from pandas.compat import is_platform_linux +from pandas.compat.numpy import np_version_gte1p24 + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_box_return_type, + _check_legend_labels, + _check_ticks_props, + _check_visible, + _flatten_visible, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestDataFramePlotsSubplots: + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + assert axes.shape == (3,) + + for ax, column in zip(axes, df.columns): + _check_legend_labels(ax, labels=[pprint_thing(column)]) + + for ax in axes[:-2]: + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + if kind != "bar": + # change https://github.com/pandas-dev/pandas/issues/26714 + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible(ax.xaxis.get_label(), visible=False) + _check_visible(ax.get_yticklabels()) + + _check_visible(axes[-1].xaxis) + _check_visible(axes[-1].get_xticklabels()) + _check_visible(axes[-1].get_xticklabels(minor=True)) + _check_visible(axes[-1].xaxis.get_label()) + _check_visible(axes[-1].get_yticklabels()) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots_no_share_x(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(kind=kind, subplots=True, sharex=False) + for ax in axes: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible(ax.get_xticklabels(minor=True)) + _check_visible(ax.xaxis.get_label()) + _check_visible(ax.get_yticklabels()) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots_no_legend(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(kind=kind, subplots=True, legend=False) + for ax in axes: + assert ax.get_legend() is None + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_subplots_timeseries(self, kind): + idx = date_range(start="2014-07-01", freq="ME", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + + axes = df.plot(kind=kind, subplots=True, sharex=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + + for ax in axes[:-2]: + # GH 7801 + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible(ax.xaxis.get_label(), visible=False) + _check_visible(ax.get_yticklabels()) + + _check_visible(axes[-1].xaxis) + _check_visible(axes[-1].get_xticklabels()) + _check_visible(axes[-1].get_xticklabels(minor=True)) + _check_visible(axes[-1].xaxis.get_label()) + _check_visible(axes[-1].get_yticklabels()) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_subplots_timeseries_rot(self, kind): + idx = date_range(start="2014-07-01", freq="ME", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7) + for ax in axes: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible(ax.get_xticklabels(minor=True)) + _check_visible(ax.xaxis.get_label()) + _check_visible(ax.get_yticklabels()) + _check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7) + + @pytest.mark.parametrize( + "col", ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"] + ) + def test_subplots_timeseries_y_axis(self, col): + # GH16953 + data = { + "numeric": np.array([1, 2, 5]), + "timedelta": [ + pd.Timedelta(-10, unit="s"), + pd.Timedelta(10, unit="m"), + pd.Timedelta(10, unit="h"), + ], + "datetime_no_tz": [ + pd.to_datetime("2017-08-01 00:00:00"), + pd.to_datetime("2017-08-01 02:00:00"), + pd.to_datetime("2017-08-02 00:00:00"), + ], + "datetime_all_tz": [ + pd.to_datetime("2017-08-01 00:00:00", utc=True), + pd.to_datetime("2017-08-01 02:00:00", utc=True), + pd.to_datetime("2017-08-02 00:00:00", utc=True), + ], + "text": ["This", "should", "fail"], + } + testdata = DataFrame(data) + + ax = testdata.plot(y=col) + result = ax.get_lines()[0].get_data()[1] + expected = testdata[col].values + assert (result == expected).all() + + def test_subplots_timeseries_y_text_error(self): + # GH16953 + data = { + "numeric": np.array([1, 2, 5]), + "text": ["This", "should", "fail"], + } + testdata = DataFrame(data) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + testdata.plot(y="text") + + @pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz") + def test_subplots_timeseries_y_axis_not_supported(self): + """ + This test will fail for: + period: + since period isn't yet implemented in ``select_dtypes`` + and because it will need a custom value converter + + tick formatter (as was done for x-axis plots) + + categorical: + because it will need a custom value converter + + tick formatter (also doesn't work for x-axis, as of now) + + datetime_mixed_tz: + because of the way how pandas handles ``Series`` of + ``datetime`` objects with different timezone, + generally converting ``datetime`` objects in a tz-aware + form could help with this problem + """ + data = { + "numeric": np.array([1, 2, 5]), + "period": [ + pd.Period("2017-08-01 00:00:00", freq="H"), + pd.Period("2017-08-01 02:00", freq="H"), + pd.Period("2017-08-02 00:00:00", freq="H"), + ], + "categorical": pd.Categorical( + ["c", "b", "a"], categories=["a", "b", "c"], ordered=False + ), + "datetime_mixed_tz": [ + pd.to_datetime("2017-08-01 00:00:00", utc=True), + pd.to_datetime("2017-08-01 02:00:00"), + pd.to_datetime("2017-08-02 00:00:00"), + ], + } + testdata = DataFrame(data) + ax_period = testdata.plot(x="numeric", y="period") + assert ( + ax_period.get_lines()[0].get_data()[1] == testdata["period"].values + ).all() + ax_categorical = testdata.plot(x="numeric", y="categorical") + assert ( + ax_categorical.get_lines()[0].get_data()[1] + == testdata["categorical"].values + ).all() + ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz") + assert ( + ax_datetime_mixed_tz.get_lines()[0].get_data()[1] + == testdata["datetime_mixed_tz"].values + ).all() + + @pytest.mark.parametrize( + "layout, exp_layout", + [ + [(2, 2), (2, 2)], + [(-1, 2), (2, 2)], + [(2, -1), (2, 2)], + [(1, 4), (1, 4)], + [(-1, 4), (1, 4)], + [(4, -1), (4, 1)], + ], + ) + def test_subplots_layout_multi_column(self, layout, exp_layout): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(subplots=True, layout=layout) + _check_axes_shape(axes, axes_num=3, layout=exp_layout) + assert axes.shape == exp_layout + + def test_subplots_layout_multi_column_error(self): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + msg = "Layout of 1x1 must be larger than required size 3" + + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, layout=(1, 1)) + + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, layout=(-1, -1)) + + @pytest.mark.parametrize( + "kwargs, expected_axes_num, expected_layout, expected_shape", + [ + ({}, 1, (1, 1), (1,)), + ({"layout": (3, 3)}, 1, (3, 3), (3, 3)), + ], + ) + def test_subplots_layout_single_column( + self, kwargs, expected_axes_num, expected_layout, expected_shape + ): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 1)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(subplots=True, **kwargs) + _check_axes_shape( + axes, + axes_num=expected_axes_num, + layout=expected_layout, + ) + assert axes.shape == expected_shape + + @pytest.mark.slow + @pytest.mark.parametrize("idx", [range(5), date_range("1/1/2000", periods=5)]) + def test_subplots_warnings(self, idx): + # GH 9464 + with tm.assert_produces_warning(None): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 4)), index=idx) + df.plot(subplots=True, layout=(3, 2)) + + def test_subplots_multiple_axes(self): + # GH 5353, 6970, GH 7069 + fig, axes = mpl.pyplot.subplots(2, 3) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + assert returned.shape == (3,) + assert returned[0].figure is fig + # draw on second row + returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + assert returned.shape == (3,) + assert returned[0].figure is fig + _check_axes_shape(axes, axes_num=6, layout=(2, 3)) + + def test_subplots_multiple_axes_error(self): + # GH 5353, 6970, GH 7069 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + msg = "The number of passed axes must be 3, the same as the output plot" + _, axes = mpl.pyplot.subplots(2, 3) + + with pytest.raises(ValueError, match=msg): + # pass different number of axes from required + df.plot(subplots=True, ax=axes) + + @pytest.mark.parametrize( + "layout, exp_layout", + [ + [(2, 1), (2, 2)], + [(2, -1), (2, 2)], + [(-1, 2), (2, 2)], + ], + ) + def test_subplots_multiple_axes_2_dim(self, layout, exp_layout): + # GH 5353, 6970, GH 7069 + # pass 2-dim axes and invalid layout + # invalid lauout should not affect to input and return value + # (show warning is tested in + # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes + _, axes = mpl.pyplot.subplots(2, 2) + df = DataFrame( + np.random.default_rng(2).random((10, 4)), + index=list(string.ascii_letters[:10]), + ) + with tm.assert_produces_warning(UserWarning): + returned = df.plot( + subplots=True, ax=axes, layout=layout, sharex=False, sharey=False + ) + _check_axes_shape(returned, axes_num=4, layout=exp_layout) + assert returned.shape == (4,) + + def test_subplots_multiple_axes_single_col(self): + # GH 5353, 6970, GH 7069 + # single column + _, axes = mpl.pyplot.subplots(1, 1) + df = DataFrame( + np.random.default_rng(2).random((10, 1)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + assert axes.shape == (1,) + + def test_subplots_ts_share_axes(self): + # GH 3964 + _, axes = mpl.pyplot.subplots(3, 3, sharex=True, sharey=True) + mpl.pyplot.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 9)), + index=date_range(start="2014-07-01", freq="ME", periods=10), + ) + for i, ax in enumerate(axes.ravel()): + df[i].plot(ax=ax, fontsize=5) + + # Rows other than bottom should not be visible + for ax in axes[0:-1].ravel(): + _check_visible(ax.get_xticklabels(), visible=False) + + # Bottom row should be visible + for ax in axes[-1].ravel(): + _check_visible(ax.get_xticklabels(), visible=True) + + # First column should be visible + for ax in axes[[0, 1, 2], [0]].ravel(): + _check_visible(ax.get_yticklabels(), visible=True) + + # Other columns should not be visible + for ax in axes[[0, 1, 2], [1]].ravel(): + _check_visible(ax.get_yticklabels(), visible=False) + for ax in axes[[0, 1, 2], [2]].ravel(): + _check_visible(ax.get_yticklabels(), visible=False) + + def test_subplots_sharex_axes_existing_axes(self): + # GH 9158 + d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]} + df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14")) + + axes = df[["A", "B"]].plot(subplots=True) + df["C"].plot(ax=axes[0], secondary_y=True) + + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + for ax in axes.ravel(): + _check_visible(ax.get_yticklabels(), visible=True) + + def test_subplots_dup_columns(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + axes = df.plot(subplots=True) + for ax in axes: + _check_legend_labels(ax, labels=["a"]) + assert len(ax.lines) == 1 + + def test_subplots_dup_columns_secondary_y(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + axes = df.plot(subplots=True, secondary_y="a") + for ax in axes: + # (right) is only attached when subplots=False + _check_legend_labels(ax, labels=["a"]) + assert len(ax.lines) == 1 + + def test_subplots_dup_columns_secondary_y_no_subplot(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + ax = df.plot(secondary_y="a") + _check_legend_labels(ax, labels=["a (right)"] * 5) + assert len(ax.lines) == 0 + assert len(ax.right_ax.lines) == 5 + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + def test_bar_log_no_subplots(self): + # GH3254, GH3298 matplotlib/matplotlib#1882, #1892 + # regressions in 1.2.1 + expected = np.array([0.1, 1.0, 10.0, 100]) + + # no subplots + df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5)) + ax = df.plot.bar(grid=True, log=True) + tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + def test_bar_log_subplots(self): + expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4]) + + ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar( + log=True, subplots=True + ) + + tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected) + tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected) + + def test_boxplot_subplots_return_type_default(self, hist_df): + df = hist_df + + # normal style: return_type=None + result = df.plot.box(subplots=True) + assert isinstance(result, Series) + _check_box_return_type( + result, None, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.parametrize("rt", ["dict", "axes", "both"]) + def test_boxplot_subplots_return_type(self, hist_df, rt): + df = hist_df + returned = df.plot.box(return_type=rt, subplots=True) + _check_box_return_type( + returned, + rt, + expected_keys=["height", "weight", "category"], + check_ax_title=False, + ) + + def test_df_subplots_patterns_minorticks(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + + # shared subplots + _, axes = plt.subplots(2, 1, sharex=True) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of 1st ax must be hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + + def test_df_subplots_patterns_minorticks_1st_ax_hidden(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + _, axes = plt.subplots(2, 1) + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of 1st ax must be hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + + def test_df_subplots_patterns_minorticks_not_shared(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + # not shared + _, axes = plt.subplots(2, 1) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + def test_subplots_sharex_false(self): + # test when sharex is set to False, two plots should have different + # labels, GH 25160 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + df.iloc[5:, 1] = np.nan + df.iloc[:5, 0] = np.nan + + _, axs = mpl.pyplot.subplots(2, 1) + df.plot.line(ax=axs, subplots=True, sharex=False) + + expected_ax1 = np.arange(4.5, 10, 0.5) + expected_ax2 = np.arange(-0.5, 5, 0.5) + + tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1) + tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2) + + def test_subplots_constrained_layout(self): + # GH 25261 + idx = date_range(start="now", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + kwargs = {} + if hasattr(mpl.pyplot.Figure, "get_constrained_layout"): + kwargs["constrained_layout"] = True + _, axes = mpl.pyplot.subplots(2, **kwargs) + with tm.assert_produces_warning(None): + df.plot(ax=axes[0]) + with tm.ensure_clean(return_filelike=True) as path: + mpl.pyplot.savefig(path) + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [ + (None, "", "new"), + ("old", "old", "new"), + (None, "", ""), + (None, "", 1), + (None, "", [1, 2]), + ], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar"]) + def test_xlabel_ylabel_dataframe_subplots( + self, kind, index_name, old_label, new_label + ): + # GH 9093 + df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) + df.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name + axes = df.plot(kind=kind, subplots=True) + assert all(ax.get_ylabel() == "" for ax in axes) + assert all(ax.get_xlabel() == old_label for ax in axes) + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + axes = df.plot(kind=kind, ylabel=new_label, xlabel=new_label, subplots=True) + assert all(ax.get_ylabel() == str(new_label) for ax in axes) + assert all(ax.get_xlabel() == str(new_label) for ax in axes) + + @pytest.mark.parametrize( + "kwargs", + [ + # stacked center + {"kind": "bar", "stacked": True}, + {"kind": "bar", "stacked": True, "width": 0.9}, + {"kind": "barh", "stacked": True}, + {"kind": "barh", "stacked": True, "width": 0.9}, + # center + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": False, "width": 0.9}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": False, "width": 0.9}, + # subplots center + {"kind": "bar", "subplots": True}, + {"kind": "bar", "subplots": True, "width": 0.9}, + {"kind": "barh", "subplots": True}, + {"kind": "barh", "subplots": True, "width": 0.9}, + # align edge + {"kind": "bar", "stacked": True, "align": "edge"}, + {"kind": "bar", "stacked": True, "width": 0.9, "align": "edge"}, + {"kind": "barh", "stacked": True, "align": "edge"}, + {"kind": "barh", "stacked": True, "width": 0.9, "align": "edge"}, + {"kind": "bar", "stacked": False, "align": "edge"}, + {"kind": "bar", "stacked": False, "width": 0.9, "align": "edge"}, + {"kind": "barh", "stacked": False, "align": "edge"}, + {"kind": "barh", "stacked": False, "width": 0.9, "align": "edge"}, + {"kind": "bar", "subplots": True, "align": "edge"}, + {"kind": "bar", "subplots": True, "width": 0.9, "align": "edge"}, + {"kind": "barh", "subplots": True, "align": "edge"}, + {"kind": "barh", "subplots": True, "width": 0.9, "align": "edge"}, + ], + ) + def test_bar_align_multiple_columns(self, kwargs): + # GH2157 + df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5)) + self._check_bar_alignment(df, **kwargs) + + @pytest.mark.parametrize( + "kwargs", + [ + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": True}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": True}, + {"kind": "bar", "subplots": True}, + {"kind": "barh", "subplots": True}, + ], + ) + def test_bar_align_single_column(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal(5)) + self._check_bar_alignment(df, **kwargs) + + @pytest.mark.parametrize( + "kwargs", + [ + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": True}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": True}, + {"kind": "bar", "subplots": True}, + {"kind": "barh", "subplots": True}, + ], + ) + def test_bar_barwidth_position(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + self._check_bar_alignment(df, width=0.9, position=0.2, **kwargs) + + @pytest.mark.parametrize("w", [1, 1.0]) + def test_bar_barwidth_position_int(self, w): + # GH 12979 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar(stacked=True, width=w) + ticks = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4])) + assert ax.get_xlim() == (-0.75, 4.75) + # check left-edge of bars + assert ax.patches[0].get_x() == -0.5 + assert ax.patches[-1].get_x() == 3.5 + + @pytest.mark.parametrize( + "kind, kwargs", + [ + ["bar", {"stacked": True}], + ["barh", {"stacked": False}], + ["barh", {"stacked": True}], + ["bar", {"subplots": True}], + ["barh", {"subplots": True}], + ], + ) + def test_bar_barwidth_position_int_width_1(self, kind, kwargs): + # GH 12979 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + self._check_bar_alignment(df, kind=kind, width=1, **kwargs) + + def _check_bar_alignment( + self, + df, + kind="bar", + stacked=False, + subplots=False, + align="center", + width=0.5, + position=0.5, + ): + axes = df.plot( + kind=kind, + stacked=stacked, + subplots=subplots, + align=align, + width=width, + position=position, + grid=True, + ) + + axes = _flatten_visible(axes) + + for ax in axes: + if kind == "bar": + axis = ax.xaxis + ax_min, ax_max = ax.get_xlim() + min_edge = min(p.get_x() for p in ax.patches) + max_edge = max(p.get_x() + p.get_width() for p in ax.patches) + elif kind == "barh": + axis = ax.yaxis + ax_min, ax_max = ax.get_ylim() + min_edge = min(p.get_y() for p in ax.patches) + max_edge = max(p.get_y() + p.get_height() for p in ax.patches) + else: + raise ValueError + + # GH 7498 + # compare margins between lim and bar edges + tm.assert_almost_equal(ax_min, min_edge - 0.25) + tm.assert_almost_equal(ax_max, max_edge + 0.25) + + p = ax.patches[0] + if kind == "bar" and (stacked is True or subplots is True): + edge = p.get_x() + center = edge + p.get_width() * position + elif kind == "bar" and stacked is False: + center = p.get_x() + p.get_width() * len(df.columns) * position + edge = p.get_x() + elif kind == "barh" and (stacked is True or subplots is True): + center = p.get_y() + p.get_height() * position + edge = p.get_y() + elif kind == "barh" and stacked is False: + center = p.get_y() + p.get_height() * len(df.columns) * position + edge = p.get_y() + else: + raise ValueError + + # Check the ticks locates on integer + assert (axis.get_ticklocs() == np.arange(len(df))).all() + + if align == "center": + # Check whether the bar locates on center + tm.assert_almost_equal(axis.get_ticklocs()[0], center) + elif align == "edge": + # Check whether the bar's edge starts from the tick + tm.assert_almost_equal(axis.get_ticklocs()[0], edge) + else: + raise ValueError + + return axes diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py new file mode 100644 index 0000000000000000000000000000000000000000..a9250fa8347cc04fa34c28b016e1fb27d837284f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py @@ -0,0 +1,342 @@ +import re + +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_plot_works, + get_x_axis, + get_y_axis, +) + +pytest.importorskip("matplotlib") + + +@pytest.fixture +def hist_df(): + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), columns=["A", "B"] + ) + df["C"] = np.random.default_rng(2).choice(["a", "b", "c"], 30) + df["D"] = np.random.default_rng(2).choice(["a", "b", "c"], 30) + return df + + +class TestHistWithBy: + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, titles, legends", + [ + ("C", "A", ["a", "b", "c"], [["A"]] * 3), + ("C", ["A", "B"], ["a", "b", "c"], [["A", "B"]] * 3), + ("C", None, ["a", "b", "c"], [["A", "B"]] * 3), + ( + ["C", "D"], + "A", + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A"]] * 3, + ), + ( + ["C", "D"], + ["A", "B"], + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A", "B"]] * 3, + ), + ( + ["C", "D"], + None, + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A", "B"]] * 3, + ), + ], + ) + def test_hist_plot_by_argument(self, by, column, titles, legends, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.hist, column=column, by=by, default_axes=True + ) + result_titles = [ax.get_title() for ax in axes] + result_legends = [ + [legend.get_text() for legend in ax.get_legend().texts] for ax in axes + ] + + assert result_legends == legends + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column, titles, legends", + [ + (0, "A", ["a", "b", "c"], [["A"]] * 3), + (0, None, ["a", "b", "c"], [["A", "B"]] * 3), + ( + [0, "D"], + "A", + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A"]] * 3, + ), + ], + ) + def test_hist_plot_by_0(self, by, column, titles, legends, hist_df): + # GH 15079 + df = hist_df.copy() + df = df.rename(columns={"C": 0}) + + axes = _check_plot_works(df.plot.hist, default_axes=True, column=column, by=by) + result_titles = [ax.get_title() for ax in axes] + result_legends = [ + [legend.get_text() for legend in ax.get_legend().texts] for ax in axes + ] + + assert result_legends == legends + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column", + [ + ([], ["A"]), + ([], ["A", "B"]), + ((), None), + ((), ["A", "B"]), + ], + ) + def test_hist_plot_empty_list_string_tuple_by(self, by, column, hist_df): + # GH 15079 + msg = "No group keys passed" + with pytest.raises(ValueError, match=msg): + _check_plot_works( + hist_df.plot.hist, default_axes=True, column=column, by=by + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, layout, axes_num", + [ + (["C"], "A", (2, 2), 3), + ("C", "A", (2, 2), 3), + (["C"], ["A"], (1, 3), 3), + ("C", None, (3, 1), 3), + ("C", ["A", "B"], (3, 1), 3), + (["C", "D"], "A", (9, 1), 3), + (["C", "D"], "A", (3, 3), 3), + (["C", "D"], ["A"], (5, 2), 3), + (["C", "D"], ["A", "B"], (9, 1), 3), + (["C", "D"], None, (9, 1), 3), + (["C", "D"], ["A", "B"], (5, 2), 3), + ], + ) + def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df): + # GH 15079 + # _check_plot_works adds an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + hist_df.plot.hist, column=column, by=by, layout=layout + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + @pytest.mark.parametrize( + "msg, by, layout", + [ + ("larger than required size", ["C", "D"], (1, 1)), + (re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)), + ("At least one dimension of layout must be positive", "C", (-1, -1)), + ], + ) + def test_hist_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df): + # GH 15079, test if error is raised when invalid layout is given + + with pytest.raises(ValueError, match=msg): + hist_df.plot.hist(column=["A", "B"], by=by, layout=layout) + + @pytest.mark.slow + def test_axis_share_x_with_by(self, hist_df): + # GH 15079 + ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharex=True) + + # share x + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + assert get_x_axis(ax3).joined(ax1, ax3) + assert get_x_axis(ax3).joined(ax2, ax3) + + # don't share y + assert not get_y_axis(ax1).joined(ax1, ax2) + assert not get_y_axis(ax2).joined(ax1, ax2) + assert not get_y_axis(ax3).joined(ax1, ax3) + assert not get_y_axis(ax3).joined(ax2, ax3) + + @pytest.mark.slow + def test_axis_share_y_with_by(self, hist_df): + # GH 15079 + ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharey=True) + + # share y + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + assert get_y_axis(ax3).joined(ax1, ax3) + assert get_y_axis(ax3).joined(ax2, ax3) + + # don't share x + assert not get_x_axis(ax1).joined(ax1, ax2) + assert not get_x_axis(ax2).joined(ax1, ax2) + assert not get_x_axis(ax3).joined(ax1, ax3) + assert not get_x_axis(ax3).joined(ax2, ax3) + + @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)]) + def test_figure_shape_hist_with_by(self, figsize, hist_df): + # GH 15079 + axes = hist_df.plot.hist(column="A", by="C", figsize=figsize) + _check_axes_shape(axes, axes_num=3, figsize=figsize) + + +class TestBoxWithBy: + @pytest.mark.parametrize( + "by, column, titles, xticklabels", + [ + ("C", "A", ["A"], [["a", "b", "c"]]), + ( + ["C", "D"], + "A", + ["A"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ], + ), + ("C", ["A", "B"], ["A", "B"], [["a", "b", "c"]] * 2), + ( + ["C", "D"], + ["A", "B"], + ["A", "B"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ] + * 2, + ), + (["C"], None, ["A", "B"], [["a", "b", "c"]] * 2), + ], + ) + def test_box_plot_by_argument(self, by, column, titles, xticklabels, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.box, default_axes=True, column=column, by=by + ) + result_titles = [ax.get_title() for ax in axes] + result_xticklabels = [ + [label.get_text() for label in ax.get_xticklabels()] for ax in axes + ] + + assert result_xticklabels == xticklabels + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column, titles, xticklabels", + [ + (0, "A", ["A"], [["a", "b", "c"]]), + ( + [0, "D"], + "A", + ["A"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ], + ), + (0, None, ["A", "B"], [["a", "b", "c"]] * 2), + ], + ) + def test_box_plot_by_0(self, by, column, titles, xticklabels, hist_df): + # GH 15079 + df = hist_df.copy() + df = df.rename(columns={"C": 0}) + + axes = _check_plot_works(df.plot.box, default_axes=True, column=column, by=by) + result_titles = [ax.get_title() for ax in axes] + result_xticklabels = [ + [label.get_text() for label in ax.get_xticklabels()] for ax in axes + ] + + assert result_xticklabels == xticklabels + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column", + [ + ([], ["A"]), + ((), "A"), + ([], None), + ((), ["A", "B"]), + ], + ) + def test_box_plot_with_none_empty_list_by(self, by, column, hist_df): + # GH 15079 + msg = "No group keys passed" + with pytest.raises(ValueError, match=msg): + _check_plot_works(hist_df.plot.box, default_axes=True, column=column, by=by) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, layout, axes_num", + [ + (["C"], "A", (1, 1), 1), + ("C", "A", (1, 1), 1), + ("C", None, (2, 1), 2), + ("C", ["A", "B"], (1, 2), 2), + (["C", "D"], "A", (1, 1), 1), + (["C", "D"], None, (1, 2), 2), + ], + ) + def test_box_plot_layout_with_by(self, by, column, layout, axes_num, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.box, default_axes=True, column=column, by=by, layout=layout + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + @pytest.mark.parametrize( + "msg, by, layout", + [ + ("larger than required size", ["C", "D"], (1, 1)), + (re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)), + ("At least one dimension of layout must be positive", "C", (-1, -1)), + ], + ) + def test_box_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df): + # GH 15079, test if error is raised when invalid layout is given + + with pytest.raises(ValueError, match=msg): + hist_df.plot.box(column=["A", "B"], by=by, layout=layout) + + @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)]) + def test_figure_shape_hist_with_by(self, figsize, hist_df): + # GH 15079 + axes = hist_df.plot.box(column="A", by="C", figsize=figsize) + _check_axes_shape(axes, axes_num=1, figsize=figsize) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py new file mode 100644 index 0000000000000000000000000000000000000000..8a8643415ae12f96bbbd87ed85ff74f8813b07e4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py @@ -0,0 +1,98 @@ +import sys +import types + +import pytest + +import pandas.util._test_decorators as td + +import pandas + + +@pytest.fixture +def dummy_backend(): + db = types.ModuleType("pandas_dummy_backend") + setattr(db, "plot", lambda *args, **kwargs: "used_dummy") + return db + + +@pytest.fixture +def restore_backend(): + """Restore the plotting backend to matplotlib""" + with pandas.option_context("plotting.backend", "matplotlib"): + yield + + +def test_backend_is_not_module(): + msg = "Could not find plotting backend 'not_an_existing_module'." + with pytest.raises(ValueError, match=msg): + pandas.set_option("plotting.backend", "not_an_existing_module") + + assert pandas.options.plotting.backend == "matplotlib" + + +def test_backend_is_correct(monkeypatch, restore_backend, dummy_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + + pandas.set_option("plotting.backend", "pandas_dummy_backend") + assert pandas.get_option("plotting.backend") == "pandas_dummy_backend" + assert ( + pandas.plotting._core._get_plot_backend("pandas_dummy_backend") is dummy_backend + ) + + +def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend, dummy_backend): + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + df = pandas.DataFrame([1, 2, 3]) + + assert pandas.get_option("plotting.backend") == "matplotlib" + assert df.plot(backend="pandas_dummy_backend") == "used_dummy" + + +def test_register_entrypoint(restore_backend, tmp_path, monkeypatch, dummy_backend): + monkeypatch.syspath_prepend(tmp_path) + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + + dist_info = tmp_path / "my_backend-0.0.0.dist-info" + dist_info.mkdir() + # entry_point name should not match module name - otherwise pandas will + # fall back to backend lookup by module name + (dist_info / "entry_points.txt").write_bytes( + b"[pandas_plotting_backends]\nmy_ep_backend = pandas_dummy_backend\n" + ) + + assert pandas.plotting._core._get_plot_backend("my_ep_backend") is dummy_backend + + with pandas.option_context("plotting.backend", "my_ep_backend"): + assert pandas.plotting._core._get_plot_backend() is dummy_backend + + +def test_setting_backend_without_plot_raises(monkeypatch): + # GH-28163 + module = types.ModuleType("pandas_plot_backend") + monkeypatch.setitem(sys.modules, "pandas_plot_backend", module) + + assert pandas.options.plotting.backend == "matplotlib" + with pytest.raises( + ValueError, match="Could not find plotting backend 'pandas_plot_backend'." + ): + pandas.set_option("plotting.backend", "pandas_plot_backend") + + assert pandas.options.plotting.backend == "matplotlib" + + +@td.skip_if_installed("matplotlib") +def test_no_matplotlib_ok(): + msg = ( + 'matplotlib is required for plotting when the default backend "matplotlib" is ' + "selected." + ) + with pytest.raises(ImportError, match=msg): + pandas.plotting._core._get_plot_backend("matplotlib") + + +def test_extra_kinds_ok(monkeypatch, restore_backend, dummy_backend): + # https://github.com/pandas-dev/pandas/pull/28647 + monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend) + pandas.set_option("plotting.backend", "pandas_dummy_backend") + df = pandas.DataFrame({"A": [1, 2, 3]}) + df.plot(kind="not a real kind") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py new file mode 100644 index 0000000000000000000000000000000000000000..76f7fa1f22eec4bdb7464619226352c918d31a02 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py @@ -0,0 +1,761 @@ +""" Test cases for .boxplot method """ + +import itertools +import string + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, + date_range, + plotting, + timedelta_range, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_box_return_type, + _check_plot_works, + _check_ticks_props, + _check_visible, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +def _check_ax_limits(col, ax): + y_min, y_max = ax.get_ylim() + assert y_min <= col.min() + assert y_max >= col.max() + + +class TestDataFramePlots: + def test_stacked_boxplot_set_axis(self): + # GH2980 + import matplotlib.pyplot as plt + + n = 80 + df = DataFrame( + { + "Clinical": np.random.default_rng(2).choice([0, 1, 2, 3], n), + "Confirmed": np.random.default_rng(2).choice([0, 1, 2, 3], n), + "Discarded": np.random.default_rng(2).choice([0, 1, 2, 3], n), + }, + index=np.arange(0, n), + ) + ax = df.plot(kind="bar", stacked=True) + assert [int(x.get_text()) for x in ax.get_xticklabels()] == df.index.to_list() + ax.set_xticks(np.arange(0, 80, 10)) + plt.draw() # Update changes + assert [int(x.get_text()) for x in ax.get_xticklabels()] == list( + np.arange(0, 80, 10) + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs, warn", + [ + [{"return_type": "dict"}, None], + [{"column": ["one", "two"]}, None], + [{"column": ["one", "two"], "by": "indic"}, UserWarning], + [{"column": ["one"], "by": ["indic", "indic2"]}, None], + [{"by": "indic"}, UserWarning], + [{"by": ["indic", "indic2"]}, UserWarning], + [{"notch": 1}, None], + [{"by": "indic", "notch": 1}, UserWarning], + ], + ) + def test_boxplot_legacy1(self, kwargs, warn): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + df["indic"] = ["foo", "bar"] * 3 + df["indic2"] = ["foo", "bar", "foo"] * 2 + + # _check_plot_works can add an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(warn, check_stacklevel=False): + _check_plot_works(df.boxplot, **kwargs) + + def test_boxplot_legacy1_series(self): + ser = Series(np.random.default_rng(2).standard_normal(6)) + _check_plot_works(plotting._core.boxplot, data=ser, return_type="dict") + + def test_boxplot_legacy2(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(df.boxplot, by="X") + + def test_boxplot_legacy2_with_ax(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # When ax is supplied and required number of axes is 1, + # passed ax should be used: + _, ax = mpl.pyplot.subplots() + axes = df.boxplot("Col1", by="X", ax=ax) + ax_axes = ax.axes + assert ax_axes is axes + + def test_boxplot_legacy2_with_ax_return_type(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + fig, ax = mpl.pyplot.subplots() + axes = df.groupby("Y").boxplot(ax=ax, return_type="axes") + ax_axes = ax.axes + assert ax_axes is axes["A"] + + def test_boxplot_legacy2_with_multi_col(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # Multiple columns with an ax argument should use same figure + fig, ax = mpl.pyplot.subplots() + with tm.assert_produces_warning(UserWarning): + axes = df.boxplot( + column=["Col1", "Col2"], by="X", ax=ax, return_type="axes" + ) + assert axes["Col1"].get_figure() is fig + + def test_boxplot_legacy2_by_none(self): + df = DataFrame( + np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"] + ) + df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = Series(["A"] * 10) + # When by is None, check that all relevant lines are present in the + # dict + _, ax = mpl.pyplot.subplots() + d = df.boxplot(ax=ax, return_type="dict") + lines = list(itertools.chain.from_iterable(d.values())) + assert len(ax.get_lines()) == len(lines) + + def test_boxplot_return_type_none(self, hist_df): + # GH 12216; return_type=None & by=None -> axes + result = hist_df.boxplot() + assert isinstance(result, mpl.pyplot.Axes) + + def test_boxplot_return_type_legacy(self): + # API change in https://github.com/pandas-dev/pandas/pull/7096 + + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + msg = "return_type must be {'axes', 'dict', 'both'}" + with pytest.raises(ValueError, match=msg): + df.boxplot(return_type="NOT_A_TYPE") + + result = df.boxplot() + _check_box_return_type(result, "axes") + + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_boxplot_return_type_legacy_return_type(self, return_type): + # API change in https://github.com/pandas-dev/pandas/pull/7096 + + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + with tm.assert_produces_warning(False): + result = df.boxplot(return_type=return_type) + _check_box_return_type(result, return_type) + + def test_boxplot_axis_limits(self, hist_df): + df = hist_df.copy() + df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0]) + # One full row + height_ax, weight_ax = df.boxplot(["height", "weight"], by="category") + _check_ax_limits(df["height"], height_ax) + _check_ax_limits(df["weight"], weight_ax) + assert weight_ax._sharey == height_ax + + def test_boxplot_axis_limits_two_rows(self, hist_df): + df = hist_df.copy() + df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0]) + # Two rows, one partial + p = df.boxplot(["height", "weight", "age"], by="category") + height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0] + dummy_ax = p[1, 1] + + _check_ax_limits(df["height"], height_ax) + _check_ax_limits(df["weight"], weight_ax) + _check_ax_limits(df["age"], age_ax) + assert weight_ax._sharey == height_ax + assert age_ax._sharey == height_ax + assert dummy_ax._sharey is None + + def test_boxplot_empty_column(self): + df = DataFrame(np.random.default_rng(2).standard_normal((20, 4))) + df.loc[:, 0] = np.nan + _check_plot_works(df.boxplot, return_type="axes") + + def test_figsize(self): + df = DataFrame( + np.random.default_rng(2).random((10, 5)), columns=["A", "B", "C", "D", "E"] + ) + result = df.boxplot(return_type="axes", figsize=(12, 8)) + assert result.figure.bbox_inches.width == 12 + assert result.figure.bbox_inches.height == 8 + + def test_fontsize(self): + df = DataFrame({"a": [1, 2, 3, 4, 5, 6]}) + _check_ticks_props(df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16) + + def test_boxplot_numeric_data(self): + # GH 22799 + df = DataFrame( + { + "a": date_range("2012-01-01", periods=100), + "b": np.random.default_rng(2).standard_normal(100), + "c": np.random.default_rng(2).standard_normal(100) + 2, + "d": date_range("2012-01-01", periods=100).astype(str), + "e": date_range("2012-01-01", periods=100, tz="UTC"), + "f": timedelta_range("1 days", periods=100), + } + ) + ax = df.plot(kind="box") + assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"] + + @pytest.mark.parametrize( + "colors_kwd, expected", + [ + ( + {"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"}, + {"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"}, + ), + ({"boxes": "r"}, {"boxes": "r"}), + ("r", {"boxes": "r", "whiskers": "r", "medians": "r", "caps": "r"}), + ], + ) + def test_color_kwd(self, colors_kwd, expected): + # GH: 26214 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + result = df.boxplot(color=colors_kwd, return_type="dict") + for k, v in expected.items(): + assert result[k][0].get_color() == v + + @pytest.mark.parametrize( + "scheme,expected", + [ + ( + "dark_background", + { + "boxes": "#8dd3c7", + "whiskers": "#8dd3c7", + "medians": "#bfbbd9", + "caps": "#8dd3c7", + }, + ), + ( + "default", + { + "boxes": "#1f77b4", + "whiskers": "#1f77b4", + "medians": "#2ca02c", + "caps": "#1f77b4", + }, + ), + ], + ) + def test_colors_in_theme(self, scheme, expected): + # GH: 40769 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + import matplotlib.pyplot as plt + + plt.style.use(scheme) + result = df.plot.box(return_type="dict") + for k, v in expected.items(): + assert result[k][0].get_color() == v + + @pytest.mark.parametrize( + "dict_colors, msg", + [({"boxes": "r", "invalid_key": "r"}, "invalid key 'invalid_key'")], + ) + def test_color_kwd_errors(self, dict_colors, msg): + # GH: 26214 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + with pytest.raises(ValueError, match=msg): + df.boxplot(color=dict_colors, return_type="dict") + + @pytest.mark.parametrize( + "props, expected", + [ + ("boxprops", "boxes"), + ("whiskerprops", "whiskers"), + ("capprops", "caps"), + ("medianprops", "medians"), + ], + ) + def test_specified_props_kwd(self, props, expected): + # GH 30346 + df = DataFrame({k: np.random.default_rng(2).random(10) for k in "ABC"}) + kwd = {props: {"color": "C1"}} + result = df.boxplot(return_type="dict", **kwd) + + assert result[expected][0].get_color() == "C1" + + @pytest.mark.parametrize("vert", [True, False]) + def test_plot_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.plot(kind="box", vert=vert, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + + @pytest.mark.parametrize("vert", [True, False]) + def test_plot_box(self, vert): + # GH 54941 + rng = np.random.default_rng(2) + df1 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD")) + df2 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD")) + + xlabel, ylabel = "x", "y" + _, axs = plt.subplots(ncols=2, figsize=(10, 7), sharey=True) + df1.plot.box(ax=axs[0], vert=vert, xlabel=xlabel, ylabel=ylabel) + df2.plot.box(ax=axs[1], vert=vert, xlabel=xlabel, ylabel=ylabel) + for ax in axs: + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + mpl.pyplot.close() + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.boxplot(vert=vert, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == xlabel + assert ax.get_ylabel() == ylabel + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_group_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + xlabel, ylabel = "x", "y" + ax = df.boxplot(by="group", vert=vert, xlabel=xlabel, ylabel=ylabel) + for subplot in ax: + assert subplot.get_xlabel() == xlabel + assert subplot.get_ylabel() == ylabel + mpl.pyplot.close() + + @pytest.mark.parametrize("vert", [True, False]) + def test_boxplot_group_no_xlabel_ylabel(self, vert): + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(10), + "b": np.random.default_rng(2).standard_normal(10), + "group": np.random.default_rng(2).choice(["group1", "group2"], 10), + } + ) + ax = df.boxplot(by="group", vert=vert) + for subplot in ax: + target_label = subplot.get_xlabel() if vert else subplot.get_ylabel() + assert target_label == pprint_thing(["group"]) + mpl.pyplot.close() + + +class TestDataFrameGroupByPlots: + def test_boxplot_legacy1(self, hist_df): + grouped = hist_df.groupby(by="gender") + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(grouped.boxplot, return_type="axes") + _check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2)) + + def test_boxplot_legacy1_return_type(self, hist_df): + grouped = hist_df.groupby(by="gender") + axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.slow + def test_boxplot_legacy2(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + grouped = df.groupby(level=1) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(grouped.boxplot, return_type="axes") + _check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3)) + + @pytest.mark.slow + def test_boxplot_legacy2_return_type(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + grouped = df.groupby(level=1) + axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes") + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.parametrize( + "subplots, warn, axes_num, layout", + [[True, UserWarning, 3, (2, 2)], [False, None, 1, (1, 1)]], + ) + def test_boxplot_legacy3(self, subplots, warn, axes_num, layout): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + msg = "DataFrame.groupby with axis=1 is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + grouped = df.unstack(level=1).groupby(level=0, axis=1) + with tm.assert_produces_warning(warn, check_stacklevel=False): + axes = _check_plot_works( + grouped.boxplot, subplots=subplots, return_type="axes" + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + def test_grouped_plot_fignums(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + df = DataFrame({"height": height, "weight": weight, "gender": gender}) + gb = df.groupby("gender") + + res = gb.plot() + assert len(mpl.pyplot.get_fignums()) == 2 + assert len(res) == 2 + plt.close("all") + + res = gb.boxplot(return_type="axes") + assert len(mpl.pyplot.get_fignums()) == 1 + assert len(res) == 2 + + def test_grouped_plot_fignums_excluded_col(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + df = DataFrame({"height": height, "weight": weight, "gender": gender}) + # now works with GH 5610 as gender is excluded + df.groupby("gender").hist() + + @pytest.mark.slow + def test_grouped_box_return_type(self, hist_df): + df = hist_df + + # old style: return_type=None + result = df.boxplot(by="gender") + assert isinstance(result, np.ndarray) + _check_box_return_type( + result, None, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.slow + def test_grouped_box_return_type_groupby(self, hist_df): + df = hist_df + # now for groupby + result = df.groupby("gender").boxplot(return_type="dict") + _check_box_return_type(result, "dict", expected_keys=["Male", "Female"]) + + @pytest.mark.slow + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_grouped_box_return_type_arg(self, hist_df, return_type): + df = hist_df + + returned = df.groupby("classroom").boxplot(return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=["A", "B", "C"]) + + returned = df.boxplot(by="classroom", return_type=return_type) + _check_box_return_type( + returned, return_type, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.slow + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_grouped_box_return_type_arg_duplcate_cats(self, return_type): + columns2 = "X B C D A".split() + df2 = DataFrame( + np.random.default_rng(2).standard_normal((6, 5)), columns=columns2 + ) + categories2 = "A B".split() + df2["category"] = categories2 * 3 + + returned = df2.groupby("category").boxplot(return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=categories2) + + returned = df2.boxplot(by="category", return_type=return_type) + _check_box_return_type(returned, return_type, expected_keys=columns2) + + @pytest.mark.slow + def test_grouped_box_layout_too_small(self, hist_df): + df = hist_df + + msg = "Layout of 1x1 must be larger than required size 2" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1)) + + @pytest.mark.slow + def test_grouped_box_layout_needs_by(self, hist_df): + df = hist_df + msg = "The 'layout' keyword is not supported when 'by' is None" + with pytest.raises(ValueError, match=msg): + df.boxplot( + column=["height", "weight", "category"], + layout=(2, 1), + return_type="dict", + ) + + @pytest.mark.slow + def test_grouped_box_layout_positive_layout(self, hist_df): + df = hist_df + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "gb_key, axes_num, rows", + [["gender", 2, 1], ["category", 4, 2], ["classroom", 3, 2]], + ) + def test_grouped_box_layout_positive_layout_axes( + self, hist_df, gb_key, axes_num, rows + ): + df = hist_df + # _check_plot_works adds an ax so catch warning. see GH #13188 GH 6769 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works( + df.groupby(gb_key).boxplot, column="height", return_type="dict" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=axes_num, layout=(rows, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "col, visible", [["height", False], ["weight", True], ["category", True]] + ) + def test_grouped_box_layout_visible(self, hist_df, col, visible): + df = hist_df + # GH 5897 + axes = df.boxplot( + column=["height", "weight", "category"], by="gender", return_type="axes" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2)) + ax = axes[col] + _check_visible(ax.get_xticklabels(), visible=visible) + _check_visible([ax.xaxis.get_label()], visible=visible) + + @pytest.mark.slow + def test_grouped_box_layout_shape(self, hist_df): + df = hist_df + df.groupby("classroom").boxplot( + column=["height", "weight", "category"], return_type="dict" + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize("cols", [2, -1]) + def test_grouped_box_layout_works(self, hist_df, cols): + df = hist_df + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works( + df.groupby("category").boxplot, + column="height", + layout=(3, cols), + return_type="dict", + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(3, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize("rows, res", [[4, 4], [-1, 3]]) + def test_grouped_box_layout_axes_shape_rows(self, hist_df, rows, res): + df = hist_df + df.boxplot( + column=["height", "weight", "category"], by="gender", layout=(rows, 1) + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(res, 1)) + + @pytest.mark.slow + @pytest.mark.parametrize("cols, res", [[4, 4], [-1, 3]]) + def test_grouped_box_layout_axes_shape_cols_groupby(self, hist_df, cols, res): + df = hist_df + df.groupby("classroom").boxplot( + column=["height", "weight", "category"], + layout=(1, cols), + return_type="dict", + ) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(1, res)) + + @pytest.mark.slow + def test_grouped_box_multiple_axes(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + # check warning to ignore sharex / sharey + # this check should be done in the first function which + # passes multiple axes to plot, hist or boxplot + # location should be changed if other test is added + # which has earlier alphabetical order + with tm.assert_produces_warning(UserWarning): + _, axes = mpl.pyplot.subplots(2, 2) + df.groupby("category").boxplot(column="height", return_type="axes", ax=axes) + _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(2, 2)) + + @pytest.mark.slow + def test_grouped_box_multiple_axes_on_fig(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + fig, axes = mpl.pyplot.subplots(2, 3) + with tm.assert_produces_warning(UserWarning): + returned = df.boxplot( + column=["height", "weight", "category"], + by="gender", + return_type="axes", + ax=axes[0], + ) + returned = np.array(list(returned.values)) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[0]) + assert returned[0].figure is fig + + # draw on second row + with tm.assert_produces_warning(UserWarning): + returned = df.groupby("classroom").boxplot( + column=["height", "weight", "category"], return_type="axes", ax=axes[1] + ) + returned = np.array(list(returned.values)) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[1]) + assert returned[0].figure is fig + + @pytest.mark.slow + def test_grouped_box_multiple_axes_ax_error(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + msg = "The number of passed axes must be 3, the same as the output plot" + with pytest.raises(ValueError, match=msg): + fig, axes = mpl.pyplot.subplots(2, 3) + # pass different number of axes from required + with tm.assert_produces_warning(UserWarning): + axes = df.groupby("classroom").boxplot(ax=axes) + + def test_fontsize(self): + df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]}) + _check_ticks_props( + df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16 + ) + + @pytest.mark.parametrize( + "col, expected_xticklabel", + [ + ("v", ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]), + (["v"], ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]), + ("v1", ["(a, v1)", "(b, v1)", "(c, v1)", "(d, v1)", "(e, v1)"]), + ( + ["v", "v1"], + [ + "(a, v)", + "(a, v1)", + "(b, v)", + "(b, v1)", + "(c, v)", + "(c, v1)", + "(d, v)", + "(d, v1)", + "(e, v)", + "(e, v1)", + ], + ), + ( + None, + [ + "(a, v)", + "(a, v1)", + "(b, v)", + "(b, v1)", + "(c, v)", + "(c, v1)", + "(d, v)", + "(d, v1)", + "(e, v)", + "(e, v1)", + ], + ), + ], + ) + def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel): + # GH 16748 + df = DataFrame( + { + "cat": np.random.default_rng(2).choice(list("abcde"), 100), + "v": np.random.default_rng(2).random(100), + "v1": np.random.default_rng(2).random(100), + } + ) + grouped = df.groupby("cat") + + axes = _check_plot_works( + grouped.boxplot, subplots=False, column=col, return_type="axes" + ) + + result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] + assert expected_xticklabel == result_xticklabel + + def test_groupby_boxplot_object(self, hist_df): + # GH 43480 + df = hist_df.astype("object") + grouped = df.groupby("gender") + msg = "boxplot method requires numerical columns, nothing to plot" + with pytest.raises(ValueError, match=msg): + _check_plot_works(grouped.boxplot, subplots=False) + + def test_boxplot_multiindex_column(self): + # GH 16748 + arrays = [ + ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], + ["one", "two", "one", "two", "one", "two", "one", "two"], + ] + tuples = list(zip(*arrays)) + index = MultiIndex.from_tuples(tuples, names=["first", "second"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 8)), + index=["A", "B", "C"], + columns=index, + ) + + col = [("bar", "one"), ("bar", "two")] + axes = _check_plot_works(df.boxplot, column=col, return_type="axes") + + expected_xticklabel = ["(bar, one)", "(bar, two)"] + result_xticklabel = [x.get_text() for x in axes.get_xticklabels()] + assert expected_xticklabel == result_xticklabel diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..20daf5935624843af3224f991497f84fa6639a0d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py @@ -0,0 +1,60 @@ +import pytest + +from pandas import DataFrame +from pandas.tests.plotting.common import ( + _check_plot_works, + _check_ticks_props, + _gen_two_subplots, +) + +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestCommon: + def test__check_ticks_props(self): + # GH 34768 + df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) + ax = _check_plot_works(df.plot, rot=30) + ax.yaxis.set_tick_params(rotation=30) + msg = "expected 0.00000 but got " + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, xrot=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, xlabelsize=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, yrot=0) + with pytest.raises(AssertionError, match=msg): + _check_ticks_props(ax, ylabelsize=0) + + def test__gen_two_subplots_with_ax(self): + fig = plt.gcf() + gen = _gen_two_subplots(f=lambda **kwargs: None, fig=fig, ax="test") + # On the first yield, no subplot should be added since ax was passed + next(gen) + assert fig.get_axes() == [] + # On the second, the one axis should match fig.subplot(2, 1, 2) + next(gen) + axes = fig.get_axes() + assert len(axes) == 1 + subplot_geometry = list(axes[0].get_subplotspec().get_geometry()[:-1]) + subplot_geometry[-1] += 1 + assert subplot_geometry == [2, 1, 2] + + def test_colorbar_layout(self): + fig = plt.figure() + + axes = fig.subplot_mosaic( + """ + AB + CC + """ + ) + + x = [1, 2, 3] + y = [1, 2, 3] + + cs0 = axes["A"].scatter(x, y) + axes["B"].scatter(x, y) + + fig.colorbar(cs0, ax=[axes["A"], axes["B"]], location="right") + DataFrame(x).plot(ax=axes["C"]) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..f748d7c5fc758045fc5d3475b94e376a06f5269b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py @@ -0,0 +1,410 @@ +from datetime import ( + date, + datetime, +) +import subprocess +import sys + +import numpy as np +import pytest + +import pandas._config.config as cf + +from pandas._libs.tslibs import to_offset + +from pandas import ( + Index, + Period, + PeriodIndex, + Series, + Timestamp, + arrays, + date_range, +) +import pandas._testing as tm + +from pandas.plotting import ( + deregister_matplotlib_converters, + register_matplotlib_converters, +) +from pandas.tseries.offsets import ( + Day, + Micro, + Milli, + Second, +) + +try: + from pandas.plotting._matplotlib import converter +except ImportError: + # try / except, rather than skip, to avoid internal refactoring + # causing an improper skip + pass + +pytest.importorskip("matplotlib.pyplot") +dates = pytest.importorskip("matplotlib.dates") + + +@pytest.mark.single_cpu +def test_registry_mpl_resets(): + # Check that Matplotlib converters are properly reset (see issue #27481) + code = ( + "import matplotlib.units as units; " + "import matplotlib.dates as mdates; " + "n_conv = len(units.registry); " + "import pandas as pd; " + "pd.plotting.register_matplotlib_converters(); " + "pd.plotting.deregister_matplotlib_converters(); " + "assert len(units.registry) == n_conv" + ) + call = [sys.executable, "-c", code] + subprocess.check_output(call) + + +def test_timtetonum_accepts_unicode(): + assert converter.time2num("00:01") == converter.time2num("00:01") + + +class TestRegistration: + @pytest.mark.single_cpu + def test_dont_register_by_default(self): + # Run in subprocess to ensure a clean state + code = ( + "import matplotlib.units; " + "import pandas as pd; " + "units = dict(matplotlib.units.registry); " + "assert pd.Timestamp not in units" + ) + call = [sys.executable, "-c", code] + assert subprocess.check_call(call) == 0 + + def test_registering_no_warning(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + _, ax = plt.subplots() + + # Set to the "warn" state, in case this isn't the first test run + register_matplotlib_converters() + ax.plot(s.index, s.values) + plt.close() + + def test_pandas_plots_register(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + # Set to the "warn" state, in case this isn't the first test run + with tm.assert_produces_warning(None) as w: + s.plot() + + try: + assert len(w) == 0 + finally: + plt.close() + + def test_matplotlib_formatters(self): + units = pytest.importorskip("matplotlib.units") + + # Can't make any assertion about the start state. + # We we check that toggling converters off removes it, and toggling it + # on restores it. + + with cf.option_context("plotting.matplotlib.register_converters", True): + with cf.option_context("plotting.matplotlib.register_converters", False): + assert Timestamp not in units.registry + assert Timestamp in units.registry + + def test_option_no_warning(self): + pytest.importorskip("matplotlib.pyplot") + ctx = cf.option_context("plotting.matplotlib.register_converters", False) + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range("2017", periods=12)) + _, ax = plt.subplots() + + # Test without registering first, no warning + with ctx: + ax.plot(s.index, s.values) + + # Now test with registering + register_matplotlib_converters() + with ctx: + ax.plot(s.index, s.values) + plt.close() + + def test_registry_resets(self): + units = pytest.importorskip("matplotlib.units") + dates = pytest.importorskip("matplotlib.dates") + + # make a copy, to reset to + original = dict(units.registry) + + try: + # get to a known state + units.registry.clear() + date_converter = dates.DateConverter() + units.registry[datetime] = date_converter + units.registry[date] = date_converter + + register_matplotlib_converters() + assert units.registry[date] is not date_converter + deregister_matplotlib_converters() + assert units.registry[date] is date_converter + + finally: + # restore original stater + units.registry.clear() + for k, v in original.items(): + units.registry[k] = v + + +class TestDateTimeConverter: + @pytest.fixture + def dtc(self): + return converter.DatetimeConverter() + + def test_convert_accepts_unicode(self, dtc): + r1 = dtc.convert("2000-01-01 12:22", None, None) + r2 = dtc.convert("2000-01-01 12:22", None, None) + assert r1 == r2, "DatetimeConverter.convert should accept unicode" + + def test_conversion(self, dtc): + rs = dtc.convert(["2012-1-1"], None, None)[0] + xp = dates.date2num(datetime(2012, 1, 1)) + assert rs == xp + + rs = dtc.convert("2012-1-1", None, None) + assert rs == xp + + rs = dtc.convert(date(2012, 1, 1), None, None) + assert rs == xp + + rs = dtc.convert("2012-1-1", None, None) + assert rs == xp + + rs = dtc.convert(Timestamp("2012-1-1"), None, None) + assert rs == xp + + # also testing datetime64 dtype (GH8614) + rs = dtc.convert("2012-01-01", None, None) + assert rs == xp + + rs = dtc.convert("2012-01-01 00:00:00+0000", None, None) + assert rs == xp + + rs = dtc.convert( + np.array(["2012-01-01 00:00:00+0000", "2012-01-02 00:00:00+0000"]), + None, + None, + ) + assert rs[0] == xp + + # we have a tz-aware date (constructed to that when we turn to utc it + # is the same as our sample) + ts = Timestamp("2012-01-01").tz_localize("UTC").tz_convert("US/Eastern") + rs = dtc.convert(ts, None, None) + assert rs == xp + + rs = dtc.convert(ts.to_pydatetime(), None, None) + assert rs == xp + + rs = dtc.convert(Index([ts - Day(1), ts]), None, None) + assert rs[1] == xp + + rs = dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(), None, None) + assert rs[1] == xp + + def test_conversion_float(self, dtc): + rtol = 0.5 * 10**-9 + + rs = dtc.convert(Timestamp("2012-1-1 01:02:03", tz="UTC"), None, None) + xp = converter.mdates.date2num(Timestamp("2012-1-1 01:02:03", tz="UTC")) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + rs = dtc.convert( + Timestamp("2012-1-1 09:02:03", tz="Asia/Hong_Kong"), None, None + ) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + rs = dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + @pytest.mark.parametrize( + "values", + [ + [date(1677, 1, 1), date(1677, 1, 2)], + [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)], + ], + ) + def test_conversion_outofbounds_datetime(self, dtc, values): + # 2579 + rs = dtc.convert(values, None, None) + xp = converter.mdates.date2num(values) + tm.assert_numpy_array_equal(rs, xp) + rs = dtc.convert(values[0], None, None) + xp = converter.mdates.date2num(values[0]) + assert rs == xp + + @pytest.mark.parametrize( + "time,format_expected", + [ + (0, "00:00"), # time2num(datetime.time.min) + (86399.999999, "23:59:59.999999"), # time2num(datetime.time.max) + (90000, "01:00"), + (3723, "01:02:03"), + (39723.2, "11:02:03.200"), + ], + ) + def test_time_formatter(self, time, format_expected): + # issue 18478 + result = converter.TimeFormatter(None)(time) + assert result == format_expected + + @pytest.mark.parametrize("freq", ("B", "ms", "s")) + def test_dateindex_conversion(self, freq, dtc): + rtol = 10**-9 + dateindex = date_range("2020-01-01", periods=10, freq=freq) + rs = dtc.convert(dateindex, None, None) + xp = converter.mdates.date2num(dateindex._mpl_repr()) + tm.assert_almost_equal(rs, xp, rtol=rtol) + + @pytest.mark.parametrize("offset", [Second(), Milli(), Micro(50)]) + def test_resolution(self, offset, dtc): + # Matplotlib's time representation using floats cannot distinguish + # intervals smaller than ~10 microsecond in the common range of years. + ts1 = Timestamp("2012-1-1") + ts2 = ts1 + offset + val1 = dtc.convert(ts1, None, None) + val2 = dtc.convert(ts2, None, None) + if not val1 < val2: + raise AssertionError(f"{val1} is not less than {val2}.") + + def test_convert_nested(self, dtc): + inner = [Timestamp("2017-01-01"), Timestamp("2017-01-02")] + data = [inner, inner] + result = dtc.convert(data, None, None) + expected = [dtc.convert(x, None, None) for x in data] + assert (np.array(result) == expected).all() + + +class TestPeriodConverter: + @pytest.fixture + def pc(self): + return converter.PeriodConverter() + + @pytest.fixture + def axis(self): + class Axis: + pass + + axis = Axis() + axis.freq = "D" + return axis + + def test_convert_accepts_unicode(self, pc, axis): + r1 = pc.convert("2012-1-1", None, axis) + r2 = pc.convert("2012-1-1", None, axis) + assert r1 == r2 + + def test_conversion(self, pc, axis): + rs = pc.convert(["2012-1-1"], None, axis)[0] + xp = Period("2012-1-1").ordinal + assert rs == xp + + rs = pc.convert("2012-1-1", None, axis) + assert rs == xp + + rs = pc.convert([date(2012, 1, 1)], None, axis)[0] + assert rs == xp + + rs = pc.convert(date(2012, 1, 1), None, axis) + assert rs == xp + + rs = pc.convert([Timestamp("2012-1-1")], None, axis)[0] + assert rs == xp + + rs = pc.convert(Timestamp("2012-1-1"), None, axis) + assert rs == xp + + rs = pc.convert("2012-01-01", None, axis) + assert rs == xp + + rs = pc.convert("2012-01-01 00:00:00+0000", None, axis) + assert rs == xp + + rs = pc.convert( + np.array( + ["2012-01-01 00:00:00", "2012-01-02 00:00:00"], + dtype="datetime64[ns]", + ), + None, + axis, + ) + assert rs[0] == xp + + def test_integer_passthrough(self, pc, axis): + # GH9012 + rs = pc.convert([0, 1], None, axis) + xp = [0, 1] + assert rs == xp + + def test_convert_nested(self, pc, axis): + data = ["2012-1-1", "2012-1-2"] + r1 = pc.convert([data, data], None, axis) + r2 = [pc.convert(data, None, axis) for _ in range(2)] + assert r1 == r2 + + +class TestTimeDeltaConverter: + """Test timedelta converter""" + + @pytest.mark.parametrize( + "x, decimal, format_expected", + [ + (0.0, 0, "00:00:00"), + (3972320000000, 1, "01:06:12.3"), + (713233432000000, 2, "8 days 06:07:13.43"), + (32423432000000, 4, "09:00:23.4320"), + ], + ) + def test_format_timedelta_ticks(self, x, decimal, format_expected): + tdc = converter.TimeSeries_TimedeltaFormatter + result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal) + assert result == format_expected + + @pytest.mark.parametrize("view_interval", [(1, 2), (2, 1)]) + def test_call_w_different_view_intervals(self, view_interval, monkeypatch): + # previously broke on reversed xlmits; see GH37454 + class mock_axis: + def get_view_interval(self): + return view_interval + + tdc = converter.TimeSeries_TimedeltaFormatter() + monkeypatch.setattr(tdc, "axis", mock_axis()) + tdc(0.0, 0) + + +@pytest.mark.parametrize("year_span", [11.25, 30, 80, 150, 400, 800, 1500, 2500, 3500]) +# The range is limited to 11.25 at the bottom by if statements in +# the _quarterly_finder() function +def test_quarterly_finder(year_span): + vmin = -1000 + vmax = vmin + year_span * 4 + span = vmax - vmin + 1 + if span < 45: + pytest.skip("the quarterly finder is only invoked if the span is >= 45") + nyears = span / 4 + (min_anndef, maj_anndef) = converter._get_default_annual_spacing(nyears) + result = converter._quarterly_finder(vmin, vmax, to_offset("QE")) + quarters = PeriodIndex( + arrays.PeriodArray(np.array([x[0] for x in result]), dtype="period[Q]") + ) + majors = np.array([x[1] for x in result]) + minors = np.array([x[2] for x in result]) + major_quarters = quarters[majors] + minor_quarters = quarters[minors] + check_major_years = major_quarters.year % maj_anndef == 0 + check_minor_years = minor_quarters.year % min_anndef == 0 + check_major_quarters = major_quarters.quarter == 1 + check_minor_quarters = minor_quarters.quarter == 1 + assert np.all(check_major_years) + assert np.all(check_minor_years) + assert np.all(check_major_quarters) + assert np.all(check_minor_quarters) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py new file mode 100644 index 0000000000000000000000000000000000000000..112172656b6ecde6a94282f58a8085751085fc44 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_datetimelike.py @@ -0,0 +1,1754 @@ +""" Test cases for time series specific (freq conversion, etc) """ +from datetime import ( + date, + datetime, + time, + timedelta, +) +import pickle + +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + BaseOffset, + to_offset, +) +from pandas._libs.tslibs.dtypes import freq_to_period_freqstr + +from pandas import ( + DataFrame, + Index, + NaT, + Series, + concat, + isna, + to_datetime, +) +import pandas._testing as tm +from pandas.core.indexes.datetimes import ( + DatetimeIndex, + bdate_range, + date_range, +) +from pandas.core.indexes.period import ( + Period, + PeriodIndex, + period_range, +) +from pandas.core.indexes.timedeltas import timedelta_range +from pandas.tests.plotting.common import _check_ticks_props + +from pandas.tseries.offsets import WeekOfMonth + +mpl = pytest.importorskip("matplotlib") + + +class TestTSPlot: + @pytest.mark.filterwarnings("ignore::UserWarning") + def test_ts_plot_with_tz(self, tz_aware_fixture): + # GH2877, GH17173, GH31205, GH31580 + tz = tz_aware_fixture + index = date_range("1/1/2011", periods=2, freq="h", tz=tz) + ts = Series([188.5, 328.25], index=index) + _check_plot_works(ts.plot) + ax = ts.plot() + xdata = next(iter(ax.get_lines())).get_xdata() + # Check first and last points' labels are correct + assert (xdata[0].hour, xdata[0].minute) == (0, 0) + assert (xdata[-1].hour, xdata[-1].minute) == (1, 0) + + def test_fontsize_set_correctly(self): + # For issue #8765 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 9)), index=range(10) + ) + _, ax = mpl.pyplot.subplots() + df.plot(fontsize=2, ax=ax) + for label in ax.get_xticklabels() + ax.get_yticklabels(): + assert label.get_fontsize() == 2 + + def test_frame_inferred(self): + # inferred freq + idx = date_range("1/1/1987", freq="MS", periods=100) + idx = DatetimeIndex(idx.values, freq=None) + + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df.plot) + + # axes freq + idx = idx[0:40].union(idx[45:99]) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df2.plot) + + def test_frame_inferred_n_gt_1(self): + # N > 1 + idx = date_range("2008-1-1 00:15:00", freq="15min", periods=10) + idx = DatetimeIndex(idx.values, freq=None) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), index=idx + ) + _check_plot_works(df.plot) + + def test_is_error_nozeroindex(self): + # GH11858 + i = np.array([1, 2, 3]) + a = DataFrame(i, index=i) + _check_plot_works(a.plot, xerr=a) + _check_plot_works(a.plot, yerr=a) + + def test_nonnumeric_exclude(self): + idx = date_range("1/1/1987", freq="YE", periods=3) + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) + + fig, ax = mpl.pyplot.subplots() + df.plot(ax=ax) # it works + assert len(ax.get_lines()) == 1 # B was plotted + mpl.pyplot.close(fig) + + def test_nonnumeric_exclude_error(self): + idx = date_range("1/1/1987", freq="YE", periods=3) + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}, idx) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df["A"].plot() + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "M", "Q", "Y"]) + def test_tsplot_period(self, freq): + idx = period_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(ser.plot, ax=ax) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_tsplot_datetime(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(ser.plot, ax=ax) + + def test_tsplot(self): + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + _, ax = mpl.pyplot.subplots() + ts.plot(style="k", ax=ax) + color = (0.0, 0.0, 0.0, 1) + assert color == ax.get_lines()[0].get_color() + + def test_both_style_and_color(self): + ts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' " + "keyword argument. Please use one or the other or pass 'style' " + "without a color symbol" + ) + with pytest.raises(ValueError, match=msg): + ts.plot(style="b-", color="#000099") + + s = ts.reset_index(drop=True) + with pytest.raises(ValueError, match=msg): + s.plot(style="b-", color="#000099") + + @pytest.mark.parametrize("freq", ["ms", "us"]) + def test_high_freq(self, freq): + _, ax = mpl.pyplot.subplots() + rng = date_range("1/1/2012", periods=100, freq=freq) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _check_plot_works(ser.plot, ax=ax) + + def test_get_datevalue(self): + from pandas.plotting._matplotlib.converter import get_datevalue + + assert get_datevalue(None, "D") is None + assert get_datevalue(1987, "Y") == 1987 + assert get_datevalue(Period(1987, "Y"), "M") == Period("1987-12", "M").ordinal + assert get_datevalue("1/1/1987", "D") == Period("1987-1-1", "D").ordinal + + def test_ts_plot_format_coord(self): + def check_format_of_first_point(ax, expected_string): + first_line = ax.get_lines()[0] + first_x = first_line.get_xdata()[0].ordinal + first_y = first_line.get_ydata()[0] + assert expected_string == ax.format_coord(first_x, first_y) + + annual = Series(1, index=date_range("2014-01-01", periods=3, freq="YE-DEC")) + _, ax = mpl.pyplot.subplots() + annual.plot(ax=ax) + check_format_of_first_point(ax, "t = 2014 y = 1.000000") + + # note this is added to the annual plot already in existence, and + # changes its freq field + daily = Series(1, index=date_range("2014-01-01", periods=3, freq="D")) + daily.plot(ax=ax) + check_format_of_first_point(ax, "t = 2014-01-01 y = 1.000000") + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "M", "Q", "Y"]) + def test_line_plot_period_series(self, freq): + idx = period_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(ser.plot, ser.index.freq) + + @pytest.mark.parametrize( + "frqncy", ["1s", "3s", "5min", "7h", "4D", "8W", "11M", "3Y"] + ) + def test_line_plot_period_mlt_series(self, frqncy): + # test period index line plot for series with multiples (`mlt`) of the + # frequency (`frqncy`) rule code. tests resolution of issue #14763 + idx = period_range("12/31/1999", freq=frqncy, periods=100) + s = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(s.plot, s.index.freq.rule_code) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_datetime_series(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + _check_plot_works(ser.plot, ser.index.freq.rule_code) + + @pytest.mark.parametrize("freq", ["s", "min", "h", "D", "W", "ME", "QE", "YE"]) + def test_line_plot_period_frame(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + _check_plot_works(df.plot, df.index.freq) + + @pytest.mark.parametrize( + "frqncy", ["1s", "3s", "5min", "7h", "4D", "8W", "11M", "3Y"] + ) + def test_line_plot_period_mlt_frame(self, frqncy): + # test period index line plot for DataFrames with multiples (`mlt`) + # of the frequency (`frqncy`) rule code. tests resolution of issue + # #14763 + idx = period_range("12/31/1999", freq=frqncy, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + freq = freq_to_period_freqstr(1, df.index.freq.rule_code) + freq = df.index.asfreq(freq).freq + _check_plot_works(df.plot, freq) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_datetime_frame(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + index=idx, + columns=["A", "B", "C"], + ) + freq = freq_to_period_freqstr(1, df.index.freq.rule_code) + freq = df.index.to_period(freq).freq + _check_plot_works(df.plot, freq) + + @pytest.mark.parametrize( + "freq", ["s", "min", "h", "D", "W", "ME", "QE-DEC", "YE", "1B30Min"] + ) + def test_line_plot_inferred_freq(self, freq): + idx = date_range("12/31/1999", freq=freq, periods=100) + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + ser = Series(ser.values, Index(np.asarray(ser.index))) + _check_plot_works(ser.plot, ser.index.inferred_freq) + + ser = ser.iloc[[0, 3, 5, 6]] + _check_plot_works(ser.plot) + + def test_fake_inferred_business(self): + _, ax = mpl.pyplot.subplots() + rng = date_range("2001-1-1", "2001-1-10") + ts = Series(range(len(rng)), index=rng) + ts = concat([ts[:3], ts[5:]]) + ts.plot(ax=ax) + assert not hasattr(ax, "freq") + + def test_plot_offset_freq(self): + ser = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + _check_plot_works(ser.plot) + + def test_plot_offset_freq_business(self): + dr = date_range("2023-01-01", freq="BQS", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) + _check_plot_works(ser.plot) + + def test_plot_multiple_inferred_freq(self): + dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(2000, 1, 11)]) + ser = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) + _check_plot_works(ser.plot) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_uhf(self): + import pandas.plotting._matplotlib.converter as conv + + idx = date_range("2012-6-22 21:59:51.960928", freq="ms", periods=500) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + axis = ax.get_xaxis() + + tlocs = axis.get_ticklocs() + tlabels = axis.get_ticklabels() + for loc, label in zip(tlocs, tlabels): + xp = conv._from_ordinal(loc).strftime("%H:%M:%S.%f") + rs = str(label.get_text()) + if len(rs): + assert xp == rs + + def test_irreg_hf(self): + idx = date_range("2012-6-22 21:59:51", freq="s", periods=10) + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + + irreg = df.iloc[[0, 1, 3, 4]] + _, ax = mpl.pyplot.subplots() + irreg.plot(ax=ax) + diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff() + + sec = 1.0 / 24 / 60 / 60 + assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all() + + def test_irreg_hf_object(self): + idx = date_range("2012-6-22 21:59:51", freq="s", periods=10) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 2)), index=idx + ) + _, ax = mpl.pyplot.subplots() + df2.index = df2.index.astype(object) + df2.plot(ax=ax) + diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff() + sec = 1.0 / 24 / 60 / 60 + assert (np.fabs(diffs[1:] - sec) < 1e-8).all() + + def test_irregular_datetime64_repr_bug(self): + ser = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ) + ser = ser.iloc[[0, 1, 2, 7]] + + _, ax = mpl.pyplot.subplots() + + ret = ser.plot(ax=ax) + assert ret is not None + + for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index): + assert rs == xp + + def test_business_freq(self): + bts = Series(range(5), period_range("2020-01-01", periods=5)) + msg = r"PeriodDtype\[B\] is deprecated" + dt = bts.index[0].to_timestamp() + with tm.assert_produces_warning(FutureWarning, match=msg): + bts.index = period_range(start=dt, periods=len(bts), freq="B") + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal + idx = ax.get_lines()[0].get_xdata() + with tm.assert_produces_warning(FutureWarning, match=msg): + assert PeriodIndex(data=idx).freqstr == "B" + + def test_business_freq_convert(self): + bts = Series( + np.arange(300, dtype=np.float64), + index=date_range("2020-01-01", periods=300, freq="B"), + ).asfreq("BME") + ts = bts.to_period("M") + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal + idx = ax.get_lines()[0].get_xdata() + assert PeriodIndex(data=idx).freqstr == "M" + + def test_freq_with_no_period_alias(self): + # GH34487 + freq = WeekOfMonth() + bts = Series( + np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10) + ).asfreq(freq) + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + + idx = ax.get_lines()[0].get_xdata() + msg = "freq not specified and cannot be inferred" + with pytest.raises(ValueError, match=msg): + PeriodIndex(data=idx) + + def test_nonzero_base(self): + # GH2571 + idx = date_range("2012-12-20", periods=24, freq="h") + timedelta(minutes=30) + df = DataFrame(np.arange(24), index=idx) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + rs = ax.get_lines()[0].get_xdata() + assert not Index(rs).is_normalized + + def test_dataframe(self): + bts = DataFrame( + { + "a": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ) + } + ) + _, ax = mpl.pyplot.subplots() + bts.plot(ax=ax) + idx = ax.get_lines()[0].get_xdata() + tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx)) + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + @pytest.mark.parametrize( + "obj", + [ + Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ), + DataFrame( + { + "a": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ), + "b": Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + ) + + 1, + } + ), + ], + ) + def test_axis_limits(self, obj): + _, ax = mpl.pyplot.subplots() + obj.plot(ax=ax) + xlim = ax.get_xlim() + ax.set_xlim(xlim[0] - 5, xlim[1] + 10) + result = ax.get_xlim() + assert result[0] == xlim[0] - 5 + assert result[1] == xlim[1] + 10 + + # string + expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq)) + ax.set_xlim("1/1/2000", "4/1/2000") + result = ax.get_xlim() + assert int(result[0]) == expected[0].ordinal + assert int(result[1]) == expected[1].ordinal + + # datetime + expected = (Period("1/1/2000", ax.freq), Period("4/1/2000", ax.freq)) + ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1)) + result = ax.get_xlim() + assert int(result[0]) == expected[0].ordinal + assert int(result[1]) == expected[1].ordinal + fig = ax.get_figure() + mpl.pyplot.close(fig) + + def test_get_finder(self): + import pandas.plotting._matplotlib.converter as conv + + assert conv.get_finder(to_offset("B")) == conv._daily_finder + assert conv.get_finder(to_offset("D")) == conv._daily_finder + assert conv.get_finder(to_offset("ME")) == conv._monthly_finder + assert conv.get_finder(to_offset("QE")) == conv._quarterly_finder + assert conv.get_finder(to_offset("YE")) == conv._annual_finder + assert conv.get_finder(to_offset("W")) == conv._daily_finder + + def test_finder_daily(self): + day_lst = [10, 40, 252, 400, 950, 2750, 10000] + + msg = "Period with BDay freq is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + xpl1 = xpl2 = [Period("1999-1-1", freq="B").ordinal] * len(day_lst) + rs1 = [] + rs2 = [] + for n in day_lst: + rng = bdate_range("1999-1-1", periods=n) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + vmin, vmax = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_quarterly(self): + yrs = [3.5, 11] + + xpl1 = xpl2 = [Period("1988Q1").ordinal] * len(yrs) + rs1 = [] + rs2 = [] + for n in yrs: + rng = period_range("1987Q2", periods=int(n * 4), freq="Q") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + (vmin, vmax) = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_monthly(self): + yrs = [1.15, 2.5, 4, 11] + + xpl1 = xpl2 = [Period("Jan 1988").ordinal] * len(yrs) + rs1 = [] + rs2 = [] + for n in yrs: + rng = period_range("1987Q2", periods=int(n * 12), freq="M") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs1.append(xaxis.get_majorticklocs()[0]) + + vmin, vmax = ax.get_xlim() + ax.set_xlim(vmin + 0.9, vmax) + rs2.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs1 == xpl1 + assert rs2 == xpl2 + + def test_finder_monthly_long(self): + rng = period_range("1988Q1", periods=24 * 12, freq="M") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1989Q1", "M").ordinal + assert rs == xp + + def test_finder_annual(self): + xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170] + xp = [Period(x, freq="Y").ordinal for x in xp] + rs = [] + for nyears in [5, 10, 19, 49, 99, 199, 599, 1001]: + rng = period_range("1987", periods=nyears, freq="Y") + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs.append(xaxis.get_majorticklocs()[0]) + mpl.pyplot.close(ax.get_figure()) + + assert rs == xp + + @pytest.mark.slow + def test_finder_minutely(self): + nminutes = 50 * 24 * 60 + rng = date_range("1/1/1999", freq="Min", periods=nminutes) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1/1/1999", freq="Min").ordinal + + assert rs == xp + + def test_finder_hourly(self): + nhours = 23 + rng = date_range("1/1/1999", freq="h", periods=nhours) + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + xaxis = ax.get_xaxis() + rs = xaxis.get_majorticklocs()[0] + xp = Period("1/1/1999", freq="h").ordinal + + assert rs == xp + + def test_gaps(self): + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + ts.iloc[5:25] = np.nan + _, ax = mpl.pyplot.subplots() + ts.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[5:25, 1].all() + mpl.pyplot.close(ax.get_figure()) + + def test_gaps_irregular(self): + # irregular + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + ts = ts.iloc[[0, 1, 2, 5, 7, 9, 12, 15, 20]] + ts.iloc[2:5] = np.nan + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[2:5, 1].all() + mpl.pyplot.close(ax.get_figure()) + + def test_gaps_non_ts(self): + # non-ts + idx = [0, 1, 2, 5, 7, 9, 12, 15, 20] + ser = Series(np.random.default_rng(2).standard_normal(len(idx)), idx) + ser.iloc[2:5] = np.nan + _, ax = mpl.pyplot.subplots() + ser.plot(ax=ax) + lines = ax.get_lines() + assert len(lines) == 1 + line = lines[0] + data = line.get_xydata() + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[2:5, 1].all() + + def test_gap_upsample(self): + low = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + low.iloc[5:25] = np.nan + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + + idxh = date_range(low.index[0], low.index[-1], freq="12h") + s = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + s.plot(secondary_y=True) + lines = ax.get_lines() + assert len(lines) == 1 + assert len(ax.right_ax.get_lines()) == 1 + + line = lines[0] + data = line.get_xydata() + data = np.ma.MaskedArray(data, mask=isna(data), fill_value=np.nan) + + assert isinstance(data, np.ma.core.MaskedArray) + mask = data.mask + assert mask[5:25, 1].all() + + def test_secondary_y(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, _ = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + line = ax.get_lines()[0] + xp = Series(line.get_ydata(), line.get_xdata()) + tm.assert_series_equal(ser, xp) + assert ax.get_yaxis().get_ticks_position() == "right" + assert not axes[0].get_yaxis().get_visible() + mpl.pyplot.close(fig) + + def test_secondary_y_yaxis(self): + Series(np.random.default_rng(2).standard_normal(10)) + ser2 = Series(np.random.default_rng(2).standard_normal(10)) + _, ax2 = mpl.pyplot.subplots() + ser2.plot(ax=ax2) + assert ax2.get_yaxis().get_ticks_position() == "left" + mpl.pyplot.close(ax2.get_figure()) + + def test_secondary_both(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + ser2 = Series(np.random.default_rng(2).standard_normal(10)) + ax = ser2.plot() + ax2 = ser.plot(secondary_y=True) + assert ax.get_yaxis().get_visible() + assert not hasattr(ax, "left_ax") + assert hasattr(ax, "right_ax") + assert hasattr(ax2, "left_ax") + assert not hasattr(ax2, "right_ax") + + def test_secondary_y_ts(self): + idx = date_range("1/1/2000", periods=10) + ser = Series(np.random.default_rng(2).standard_normal(10), idx) + fig, _ = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + line = ax.get_lines()[0] + xp = Series(line.get_ydata(), line.get_xdata()).to_timestamp() + tm.assert_series_equal(ser, xp) + assert ax.get_yaxis().get_ticks_position() == "right" + assert not axes[0].get_yaxis().get_visible() + mpl.pyplot.close(fig) + + def test_secondary_y_ts_yaxis(self): + idx = date_range("1/1/2000", periods=10) + ser2 = Series(np.random.default_rng(2).standard_normal(10), idx) + _, ax2 = mpl.pyplot.subplots() + ser2.plot(ax=ax2) + assert ax2.get_yaxis().get_ticks_position() == "left" + mpl.pyplot.close(ax2.get_figure()) + + def test_secondary_y_ts_visible(self): + idx = date_range("1/1/2000", periods=10) + ser2 = Series(np.random.default_rng(2).standard_normal(10), idx) + ax = ser2.plot() + assert ax.get_yaxis().get_visible() + + def test_secondary_kde(self): + pytest.importorskip("scipy") + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, ax = mpl.pyplot.subplots() + ax = ser.plot(secondary_y=True, kind="density", ax=ax) + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + axes = fig.get_axes() + assert axes[1].get_yaxis().get_ticks_position() == "right" + + def test_secondary_bar(self): + ser = Series(np.random.default_rng(2).standard_normal(10)) + fig, ax = mpl.pyplot.subplots() + ser.plot(secondary_y=True, kind="bar", ax=ax) + axes = fig.get_axes() + assert axes[1].get_yaxis().get_ticks_position() == "right" + + def test_secondary_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=["a", "b", "c"] + ) + axes = df.plot(secondary_y=["a", "c"], subplots=True) + assert axes[0].get_yaxis().get_ticks_position() == "right" + assert axes[1].get_yaxis().get_ticks_position() == "left" + assert axes[2].get_yaxis().get_ticks_position() == "right" + + def test_secondary_bar_frame(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 3)), columns=["a", "b", "c"] + ) + axes = df.plot(kind="bar", secondary_y=["a", "c"], subplots=True) + assert axes[0].get_yaxis().get_ticks_position() == "right" + assert axes[1].get_yaxis().get_ticks_position() == "left" + assert axes[2].get_yaxis().get_ticks_position() == "right" + + def test_mixed_freq_regular_first(self): + # TODO + s1 = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20, freq="B"), + ) + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15]] + + # it works! + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + + ax2 = s2.plot(style="g", ax=ax) + lines = ax2.get_lines() + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx1 = PeriodIndex(lines[0].get_xdata()) + idx2 = PeriodIndex(lines[1].get_xdata()) + + tm.assert_index_equal(idx1, s1.index.to_period("B")) + tm.assert_index_equal(idx2, s2.index.to_period("B")) + + left, right = ax2.get_xlim() + pidx = s1.index.to_period() + assert left <= pidx[0].ordinal + assert right >= pidx[-1].ordinal + + def test_mixed_freq_irregular_first(self): + s1 = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15]] + _, ax = mpl.pyplot.subplots() + s2.plot(style="g", ax=ax) + s1.plot(ax=ax) + assert not hasattr(ax, "freq") + lines = ax.get_lines() + x1 = lines[0].get_xdata() + tm.assert_numpy_array_equal(x1, s2.index.astype(object).values) + x2 = lines[1].get_xdata() + tm.assert_numpy_array_equal(x2, s1.index.astype(object).values) + + def test_mixed_freq_regular_first_df(self): + # GH 9852 + s1 = Series( + np.arange(20, dtype=np.float64), + index=date_range("2020-01-01", periods=20, freq="B"), + ).to_frame() + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + ax2 = s2.plot(style="g", ax=ax) + lines = ax2.get_lines() + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + idx1 = PeriodIndex(lines[0].get_xdata()) + idx2 = PeriodIndex(lines[1].get_xdata()) + assert idx1.equals(s1.index.to_period("B")) + assert idx2.equals(s2.index.to_period("B")) + left, right = ax2.get_xlim() + pidx = s1.index.to_period() + assert left <= pidx[0].ordinal + assert right >= pidx[-1].ordinal + + def test_mixed_freq_irregular_first_df(self): + # GH 9852 + s1 = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ).to_frame() + s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :] + _, ax = mpl.pyplot.subplots() + s2.plot(style="g", ax=ax) + s1.plot(ax=ax) + assert not hasattr(ax, "freq") + lines = ax.get_lines() + x1 = lines[0].get_xdata() + tm.assert_numpy_array_equal(x1, s2.index.astype(object).values) + x2 = lines[1].get_xdata() + tm.assert_numpy_array_equal(x2, s1.index.astype(object).values) + + def test_mixed_freq_hf_first(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "D" + + def test_mixed_freq_alignment(self): + ts_ind = date_range("2012-01-01 13:00", "2012-01-02", freq="h") + ts_data = np.random.default_rng(2).standard_normal(12) + + ts = Series(ts_data, index=ts_ind) + ts2 = ts.asfreq("min").interpolate() + + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax) + ts2.plot(style="r", ax=ax) + + assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0] + + def test_mixed_freq_lf_first(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(legend=True, ax=ax) + high.plot(legend=True, ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "D" + leg = ax.get_legend() + assert len(leg.texts) == 2 + mpl.pyplot.close(ax.get_figure()) + + def test_mixed_freq_lf_first_hourly(self): + idxh = date_range("1/1/1999", periods=240, freq="min") + idxl = date_range("1/1/1999", periods=4, freq="h") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "min" + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_mixed_freq_irreg_period(self): + ts = Series( + np.arange(30, dtype=np.float64), index=date_range("2020-01-01", periods=30) + ) + irreg = ts.iloc[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]] + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + rng = period_range("1/3/2000", periods=30, freq="B") + ps = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + irreg.plot(ax=ax) + ps.plot(ax=ax) + + def test_mixed_freq_shared_ax(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="ME") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + + _, (ax1, ax2) = mpl.pyplot.subplots(nrows=2, sharex=True) + s1.plot(ax=ax1) + s2.plot(ax=ax2) + + assert ax1.freq == "M" + assert ax2.freq == "M" + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + def test_mixed_freq_shared_ax_twin_x(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="ME") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + # using twinx + _, ax1 = mpl.pyplot.subplots() + ax2 = ax1.twinx() + s1.plot(ax=ax1) + s2.plot(ax=ax2) + + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + @pytest.mark.xfail(reason="TODO (GH14330, GH14322)") + def test_mixed_freq_shared_ax_twin_x_irregular_first(self): + # GH13341, using sharex=True + idx1 = date_range("2015-01-01", periods=3, freq="M") + idx2 = idx1[:1].union(idx1[2:]) + s1 = Series(range(len(idx1)), idx1) + s2 = Series(range(len(idx2)), idx2) + _, ax1 = mpl.pyplot.subplots() + ax2 = ax1.twinx() + s2.plot(ax=ax1) + s1.plot(ax=ax2) + assert ax1.lines[0].get_xydata()[0, 0] == ax2.lines[0].get_xydata()[0, 0] + + def test_nat_handling(self): + _, ax = mpl.pyplot.subplots() + + dti = DatetimeIndex(["2015-01-01", NaT, "2015-01-03"]) + s = Series(range(len(dti)), dti) + s.plot(ax=ax) + xdata = ax.get_lines()[0].get_xdata() + # plot x data is bounded by index values + assert s.index.min() <= Series(xdata).min() + assert Series(xdata).max() <= s.index.max() + + def test_to_weekly_resampling_disallow_how_kwd(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + + msg = ( + "'how' is not a valid keyword for plotting functions. If plotting " + "multiple objects on shared axes, resample manually first." + ) + with pytest.raises(ValueError, match=msg): + low.plot(ax=ax, how="foo") + + def test_to_weekly_resampling(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + + def test_from_weekly_resampling(self): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + + expected_h = idxh.to_period().asi8.astype(np.float64) + expected_l = np.array( + [1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544, 1549, 1553, 1558, 1562], + dtype=np.float64, + ) + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + xdata = line.get_xdata(orig=False) + if len(xdata) == 12: # idxl lines + tm.assert_numpy_array_equal(xdata, expected_l) + else: + tm.assert_numpy_array_equal(xdata, expected_h) + + @pytest.mark.parametrize("kind1, kind2", [("line", "area"), ("area", "line")]) + def test_from_resampling_area_line_mixed(self, kind1, kind2): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = DataFrame( + np.random.default_rng(2).random((len(idxh), 3)), + index=idxh, + columns=[0, 1, 2], + ) + low = DataFrame( + np.random.default_rng(2).random((len(idxl), 3)), + index=idxl, + columns=[0, 1, 2], + ) + + _, ax = mpl.pyplot.subplots() + low.plot(kind=kind1, stacked=True, ax=ax) + high.plot(kind=kind2, stacked=True, ax=ax) + + # check low dataframe result + expected_x = np.array( + [ + 1514, + 1519, + 1523, + 1527, + 1531, + 1536, + 1540, + 1544, + 1549, + 1553, + 1558, + 1562, + ], + dtype=np.float64, + ) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[i] + assert PeriodIndex(line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + # check stacked values are correct + expected_y += low[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + # check high dataframe result + expected_x = idxh.to_period().asi8.astype(np.float64) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[3 + i] + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + expected_y += high[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + @pytest.mark.parametrize("kind1, kind2", [("line", "area"), ("area", "line")]) + def test_from_resampling_area_line_mixed_high_to_low(self, kind1, kind2): + idxh = date_range("1/1/1999", periods=52, freq="W") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = DataFrame( + np.random.default_rng(2).random((len(idxh), 3)), + index=idxh, + columns=[0, 1, 2], + ) + low = DataFrame( + np.random.default_rng(2).random((len(idxl), 3)), + index=idxl, + columns=[0, 1, 2], + ) + _, ax = mpl.pyplot.subplots() + high.plot(kind=kind1, stacked=True, ax=ax) + low.plot(kind=kind2, stacked=True, ax=ax) + + # check high dataframe result + expected_x = idxh.to_period().asi8.astype(np.float64) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + line = ax.lines[i] + assert PeriodIndex(data=line.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(line.get_xdata(orig=False), expected_x) + expected_y += high[i].values + tm.assert_numpy_array_equal(line.get_ydata(orig=False), expected_y) + + # check low dataframe result + expected_x = np.array( + [ + 1514, + 1519, + 1523, + 1527, + 1531, + 1536, + 1540, + 1544, + 1549, + 1553, + 1558, + 1562, + ], + dtype=np.float64, + ) + expected_y = np.zeros(len(expected_x), dtype=np.float64) + for i in range(3): + lines = ax.lines[3 + i] + assert PeriodIndex(data=lines.get_xdata()).freq == idxh.freq + tm.assert_numpy_array_equal(lines.get_xdata(orig=False), expected_x) + expected_y += low[i].values + tm.assert_numpy_array_equal(lines.get_ydata(orig=False), expected_y) + + def test_mixed_freq_second_millisecond(self): + # GH 7772, GH 7760 + idxh = date_range("2014-07-01 09:00", freq="s", periods=50) + idxl = date_range("2014-07-01 09:00", freq="100ms", periods=500) + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + # high to low + _, ax = mpl.pyplot.subplots() + high.plot(ax=ax) + low.plot(ax=ax) + assert len(ax.get_lines()) == 2 + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "ms" + + def test_mixed_freq_second_millisecond_low_to_high(self): + # GH 7772, GH 7760 + idxh = date_range("2014-07-01 09:00", freq="s", periods=50) + idxl = date_range("2014-07-01 09:00", freq="100ms", periods=500) + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + # low to high + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + high.plot(ax=ax) + assert len(ax.get_lines()) == 2 + for line in ax.get_lines(): + assert PeriodIndex(data=line.get_xdata()).freq == "ms" + + def test_irreg_dtypes(self): + # date + idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)] + df = DataFrame( + np.random.default_rng(2).standard_normal((len(idx), 3)), + Index(idx, dtype=object), + ) + _check_plot_works(df.plot) + + def test_irreg_dtypes_dt64(self): + # np.datetime64 + idx = date_range("1/1/2000", periods=10) + idx = idx[[0, 2, 5, 9]].astype(object) + df = DataFrame(np.random.default_rng(2).standard_normal((len(idx), 3)), idx) + _, ax = mpl.pyplot.subplots() + _check_plot_works(df.plot, ax=ax) + + def test_time(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + def test_time_change_xlim(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + # change xlim + ax.set_xlim("1:30", "5:00") + + # check tick labels again + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if s != 0: + xp = time(h, m, s).strftime("%H:%M:%S") + else: + xp = time(h, m, s).strftime("%H:%M") + assert xp == rs + + def test_time_musec(self): + t = datetime(1, 1, 1, 3, 30, 0) + deltas = np.random.default_rng(2).integers(1, 20, 3).cumsum() + ts = np.array([(t + timedelta(microseconds=int(x))).time() for x in deltas]) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(len(ts)), + "b": np.random.default_rng(2).standard_normal(len(ts)), + }, + index=ts, + ) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + + # verify tick labels + ticks = ax.get_xticks() + labels = ax.get_xticklabels() + for _tick, _label in zip(ticks, labels): + m, s = divmod(int(_tick), 60) + + us = round((_tick - int(_tick)) * 1e6) + + h, m = divmod(m, 60) + rs = _label.get_text() + if len(rs) > 0: + if (us % 1000) != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S.%f") + elif (us // 1000) != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S.%f")[:-3] + elif s != 0: + xp = time(h, m, s, us).strftime("%H:%M:%S") + else: + xp = time(h, m, s, us).strftime("%H:%M") + assert xp == rs + + def test_secondary_upsample(self): + idxh = date_range("1/1/1999", periods=365, freq="D") + idxl = date_range("1/1/1999", periods=12, freq="ME") + high = Series(np.random.default_rng(2).standard_normal(len(idxh)), idxh) + low = Series(np.random.default_rng(2).standard_normal(len(idxl)), idxl) + _, ax = mpl.pyplot.subplots() + low.plot(ax=ax) + ax = high.plot(secondary_y=True, ax=ax) + for line in ax.get_lines(): + assert PeriodIndex(line.get_xdata()).freq == "D" + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + for line in ax.left_ax.get_lines(): + assert PeriodIndex(line.get_xdata()).freq == "D" + + def test_secondary_legend(self): + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + + # ts + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + df.plot(secondary_y=["A", "B"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert leg.get_texts()[0].get_text() == "A (right)" + assert leg.get_texts()[1].get_text() == "B (right)" + assert leg.get_texts()[2].get_text() == "C" + assert leg.get_texts()[3].get_text() == "D" + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close(fig) + + def test_secondary_legend_right(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + df.plot(secondary_y=["A", "C"], mark_right=False, ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert leg.get_texts()[0].get_text() == "A" + assert leg.get_texts()[1].get_text() == "B" + assert leg.get_texts()[2].get_text() == "C" + assert leg.get_texts()[3].get_text() == "D" + mpl.pyplot.close(fig) + + def test_secondary_legend_bar(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig, ax = mpl.pyplot.subplots() + df.plot(kind="bar", secondary_y=["A"], ax=ax) + leg = ax.get_legend() + assert leg.get_texts()[0].get_text() == "A (right)" + assert leg.get_texts()[1].get_text() == "B" + mpl.pyplot.close(fig) + + def test_secondary_legend_bar_right(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig, ax = mpl.pyplot.subplots() + df.plot(kind="bar", secondary_y=["A"], mark_right=False, ax=ax) + leg = ax.get_legend() + assert leg.get_texts()[0].get_text() == "A" + assert leg.get_texts()[1].get_text() == "B" + mpl.pyplot.close(fig) + + def test_secondary_legend_multi_col(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot(secondary_y=["C", "D"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close(fig) + + def test_secondary_legend_nonts(self): + # non-ts + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + ax = df.plot(secondary_y=["A", "B"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + mpl.pyplot.close() + + def test_secondary_legend_nonts_multi_col(self): + # non-ts + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD"), dtype=object), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + fig = mpl.pyplot.figure() + ax = fig.add_subplot(211) + ax = df.plot(secondary_y=["C", "D"], ax=ax) + leg = ax.get_legend() + assert len(leg.get_lines()) == 4 + assert ax.right_ax.get_legend() is None + colors = set() + for line in leg.get_lines(): + colors.add(line.get_color()) + + # TODO: color cycle problems + assert len(colors) == 4 + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_format_date_axis(self): + rng = date_range("1/1/2012", periods=12, freq="ME") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + xaxis = ax.get_xaxis() + for line in xaxis.get_ticklabels(): + if len(line.get_text()) > 0: + assert line.get_rotation() == 30 + + def test_ax_plot(self): + x = date_range(start="2012-01-02", periods=10, freq="D") + y = list(range(len(x))) + _, ax = mpl.pyplot.subplots() + lines = ax.plot(x, y, label="Y") + tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x) + + def test_mpl_nopandas(self): + dates = [date(2008, 12, 31), date(2009, 1, 31)] + values1 = np.arange(10.0, 11.0, 0.5) + values2 = np.arange(11.0, 12.0, 0.5) + + kw = {"fmt": "-", "lw": 4} + + _, ax = mpl.pyplot.subplots() + ax.plot_date([x.toordinal() for x in dates], values1, **kw) + ax.plot_date([x.toordinal() for x in dates], values2, **kw) + + line1, line2 = ax.get_lines() + + exp = np.array([x.toordinal() for x in dates], dtype=np.float64) + tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp) + exp = np.array([x.toordinal() for x in dates], dtype=np.float64) + tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp) + + def test_irregular_ts_shared_ax_xlim(self): + # GH 2960 + from pandas.plotting._matplotlib.converter import DatetimeConverter + + ts = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + ts_irregular = ts.iloc[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] + + # plot the left section of the irregular series, then the right section + _, ax = mpl.pyplot.subplots() + ts_irregular[:5].plot(ax=ax) + ts_irregular[5:].plot(ax=ax) + + # check that axis limits are correct + left, right = ax.get_xlim() + assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) + assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) + + def test_secondary_y_non_ts_xlim(self): + # GH 3490 - non-timeseries with secondary y + index_1 = [1, 2, 3, 4] + index_2 = [5, 6, 7, 8] + s1 = Series(1, index=index_1) + s2 = Series(2, index=index_2) + + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + left_before, right_before = ax.get_xlim() + s2.plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + assert left_before >= left_after + assert right_before < right_after + + def test_secondary_y_regular_ts_xlim(self): + # GH 3490 - regular-timeseries with secondary y + index_1 = date_range(start="2000-01-01", periods=4, freq="D") + index_2 = date_range(start="2000-01-05", periods=4, freq="D") + s1 = Series(1, index=index_1) + s2 = Series(2, index=index_2) + + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + left_before, right_before = ax.get_xlim() + s2.plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + assert left_before >= left_after + assert right_before < right_after + + def test_secondary_y_mixed_freq_ts_xlim(self): + # GH 3490 - mixed frequency timeseries with secondary y + rng = date_range("2000-01-01", periods=10000, freq="min") + ts = Series(1, index=rng) + + _, ax = mpl.pyplot.subplots() + ts.plot(ax=ax) + left_before, right_before = ax.get_xlim() + ts.resample("D").mean().plot(secondary_y=True, ax=ax) + left_after, right_after = ax.get_xlim() + + # a downsample should not have changed either limit + assert left_before == left_after + assert right_before == right_after + + def test_secondary_y_irregular_ts_xlim(self): + # GH 3490 - irregular-timeseries with secondary y + from pandas.plotting._matplotlib.converter import DatetimeConverter + + ts = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + ts_irregular = ts.iloc[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]] + + _, ax = mpl.pyplot.subplots() + ts_irregular[:5].plot(ax=ax) + # plot higher-x values on secondary axis + ts_irregular[5:].plot(secondary_y=True, ax=ax) + # ensure secondary limits aren't overwritten by plot on primary + ts_irregular[:5].plot(ax=ax) + + left, right = ax.get_xlim() + assert left <= DatetimeConverter.convert(ts_irregular.index.min(), "", ax) + assert right >= DatetimeConverter.convert(ts_irregular.index.max(), "", ax) + + def test_plot_outofbounds_datetime(self): + # 2579 - checking this does not raise + values = [date(1677, 1, 1), date(1677, 1, 2)] + _, ax = mpl.pyplot.subplots() + ax.plot(values) + + values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)] + ax.plot(values) + + def test_format_timedelta_ticks_narrow(self): + expected_labels = [f"00:00:00.0000000{i:0>2d}" for i in np.arange(10)] + + rng = timedelta_range("0", periods=10, freq="ns") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + df.plot(fontsize=2, ax=ax) + mpl.pyplot.draw() + labels = ax.get_xticklabels() + + result_labels = [x.get_text() for x in labels] + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels + + def test_format_timedelta_ticks_wide(self): + expected_labels = [ + "00:00:00", + "1 days 03:46:40", + "2 days 07:33:20", + "3 days 11:20:00", + "4 days 15:06:40", + "5 days 18:53:20", + "6 days 22:40:00", + "8 days 02:26:40", + "9 days 06:13:20", + ] + + rng = timedelta_range("0", periods=10, freq="1 d") + df = DataFrame(np.random.default_rng(2).standard_normal((len(rng), 3)), rng) + _, ax = mpl.pyplot.subplots() + ax = df.plot(fontsize=2, ax=ax) + mpl.pyplot.draw() + labels = ax.get_xticklabels() + + result_labels = [x.get_text() for x in labels] + assert len(result_labels) == len(expected_labels) + assert result_labels == expected_labels + + def test_timedelta_plot(self): + # test issue #8711 + s = Series(range(5), timedelta_range("1day", periods=5)) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_timedelta_long_period(self): + # test long period + index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 d") + s = Series(np.random.default_rng(2).standard_normal(len(index)), index) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_timedelta_short_period(self): + # test short period + index = timedelta_range("1 day 2 hr 30 min 10 s", periods=10, freq="1 ns") + s = Series(np.random.default_rng(2).standard_normal(len(index)), index) + _, ax = mpl.pyplot.subplots() + _check_plot_works(s.plot, ax=ax) + + def test_hist(self): + # https://github.com/matplotlib/matplotlib/issues/8459 + rng = date_range("1/1/2011", periods=10, freq="h") + x = rng + w1 = np.arange(0, 1, 0.1) + w2 = np.arange(0, 1, 0.1)[::-1] + _, ax = mpl.pyplot.subplots() + ax.hist([x, x], weights=[w1, w2]) + + def test_overlapping_datetime(self): + # GB 6608 + s1 = Series( + [1, 2, 3], + index=[ + datetime(1995, 12, 31), + datetime(2000, 12, 31), + datetime(2005, 12, 31), + ], + ) + s2 = Series( + [1, 2, 3], + index=[ + datetime(1997, 12, 31), + datetime(2003, 12, 31), + datetime(2008, 12, 31), + ], + ) + + # plot first series, then add the second series to those axes, + # then try adding the first series again + _, ax = mpl.pyplot.subplots() + s1.plot(ax=ax) + s2.plot(ax=ax) + s1.plot(ax=ax) + + @pytest.mark.xfail(reason="GH9053 matplotlib does not use ax.xaxis.converter") + def test_add_matplotlib_datetime64(self): + # GH9053 - ensure that a plot with PeriodConverter still understands + # datetime64 data. This still fails because matplotlib overrides the + # ax.xaxis.converter with a DatetimeConverter + s = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1970-01-02", periods=10), + ) + ax = s.plot() + with tm.assert_produces_warning(DeprecationWarning): + # multi-dimensional indexing + ax.plot(s.index, s.values, color="g") + l1, l2 = ax.lines + tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata()) + + def test_matplotlib_scatter_datetime64(self): + # https://github.com/matplotlib/matplotlib/issues/11391 + df = DataFrame(np.random.default_rng(2).random((10, 2)), columns=["x", "y"]) + df["time"] = date_range("2018-01-01", periods=10, freq="D") + _, ax = mpl.pyplot.subplots() + ax.scatter(x="time", y="y", data=df) + mpl.pyplot.draw() + label = ax.get_xticklabels()[0] + expected = "2018-01-01" + assert label.get_text() == expected + + def test_check_xticks_rot(self): + # https://github.com/pandas-dev/pandas/issues/29460 + # regular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-03"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + axes = df.plot(x="x", y="y") + _check_ticks_props(axes, xrot=0) + + def test_check_xticks_rot_irregular(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + axes = df.plot(x="x", y="y") + _check_ticks_props(axes, xrot=30) + + def test_check_xticks_rot_use_idx(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + # use timeseries index or not + axes = df.set_index("x").plot(y="y", use_index=True) + _check_ticks_props(axes, xrot=30) + axes = df.set_index("x").plot(y="y", use_index=False) + _check_ticks_props(axes, xrot=0) + + def test_check_xticks_rot_sharex(self): + # irregular time series + x = to_datetime(["2020-05-01", "2020-05-02", "2020-05-04"]) + df = DataFrame({"x": x, "y": [1, 2, 3]}) + # separate subplots + axes = df.plot(x="x", y="y", subplots=True, sharex=True) + _check_ticks_props(axes, xrot=30) + axes = df.plot(x="x", y="y", subplots=True, sharex=False) + _check_ticks_props(axes, xrot=0) + + +def _check_plot_works(f, freq=None, series=None, *args, **kwargs): + import matplotlib.pyplot as plt + + fig = plt.gcf() + + try: + plt.clf() + ax = fig.add_subplot(211) + orig_ax = kwargs.pop("ax", plt.gca()) + orig_axfreq = getattr(orig_ax, "freq", None) + + ret = f(*args, **kwargs) + assert ret is not None # do something more intelligent + + ax = kwargs.pop("ax", plt.gca()) + if series is not None: + dfreq = series.index.freq + if isinstance(dfreq, BaseOffset): + dfreq = dfreq.rule_code + if orig_axfreq is None: + assert ax.freq == dfreq + + if freq is not None: + ax_freq = to_offset(ax.freq, is_period=True) + if freq is not None and orig_axfreq is None: + assert ax_freq == freq + + ax = fig.add_subplot(212) + kwargs["ax"] = ax + ret = f(*args, **kwargs) + assert ret is not None # TODO: do something more intelligent + + # GH18439, GH#24088, statsmodels#4772 + with tm.ensure_clean(return_filelike=True) as path: + pickle.dump(fig, path) + finally: + plt.close(fig) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..5ebf93510a61549c838d91ab2e703f9db23fd626 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_groupby.py @@ -0,0 +1,155 @@ +""" Test cases for GroupBy.plot """ + + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_legend_labels, +) + +pytest.importorskip("matplotlib") + + +class TestDataFrameGroupByPlots: + def test_series_groupby_plotting_nominally_works(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + + weight.groupby(gender).plot() + + def test_series_groupby_plotting_nominally_works_hist(self): + n = 10 + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + height.groupby(gender).hist() + + def test_series_groupby_plotting_nominally_works_alpha(self): + n = 10 + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender = np.random.default_rng(2).choice(["male", "female"], size=n) + # Regression test for GH8733 + height.groupby(gender).plot(alpha=0.5) + + def test_plotting_with_float_index_works(self): + # GH 7025 + df = DataFrame( + { + "def": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "val": np.random.default_rng(2).standard_normal(9), + }, + index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0], + ) + + df.groupby("def")["val"].plot() + + def test_plotting_with_float_index_works_apply(self): + # GH 7025 + df = DataFrame( + { + "def": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "val": np.random.default_rng(2).standard_normal(9), + }, + index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0], + ) + df.groupby("def")["val"].apply(lambda x: x.plot()) + + def test_hist_single_row(self): + # GH10214 + bins = np.arange(80, 100 + 2, 1) + df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]}) + df["Mark"].hist(by=df["ByCol"], bins=bins) + + def test_hist_single_row_single_bycol(self): + # GH10214 + bins = np.arange(80, 100 + 2, 1) + df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]}) + df["Mark"].hist(by=df["ByCol"], bins=bins) + + def test_plot_submethod_works(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + df.groupby("z").plot.scatter("x", "y") + + def test_plot_submethod_works_line(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + df.groupby("z")["x"].plot.line() + + def test_plot_kwargs(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + + res = df.groupby("z").plot(kind="scatter", x="x", y="y") + # check that a scatter plot is effectively plotted: the axes should + # contain a PathCollection from the scatter plot (GH11805) + assert len(res["a"].collections) == 1 + + def test_plot_kwargs_scatter(self): + df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")}) + res = df.groupby("z").plot.scatter(x="x", y="y") + assert len(res["a"].collections) == 1 + + @pytest.mark.parametrize("column, expected_axes_num", [(None, 2), ("b", 1)]) + def test_groupby_hist_frame_with_legend(self, column, expected_axes_num): + # GH 6279 - DataFrameGroupBy histogram can have a legend + expected_layout = (1, expected_axes_num) + expected_labels = column or [["a"], ["b"]] + + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + for axes in g.hist(legend=True, column=column): + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + for ax, expected_label in zip(axes[0], expected_labels): + _check_legend_labels(ax, expected_label) + + @pytest.mark.parametrize("column", [None, "b"]) + def test_groupby_hist_frame_with_legend_raises(self, column): + # GH 6279 - DataFrameGroupBy histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + g.hist(legend=True, column=column, label="d") + + def test_groupby_hist_series_with_legend(self): + # GH 6279 - SeriesGroupBy histogram can have a legend + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + for ax in g["a"].hist(legend=True): + _check_axes_shape(ax, axes_num=1, layout=(1, 1)) + _check_legend_labels(ax, ["1", "2"]) + + def test_groupby_hist_series_with_legend_raises(self): + # GH 6279 - SeriesGroupBy histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + g = df.groupby("c") + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + g.hist(legend=True, label="d") diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py new file mode 100644 index 0000000000000000000000000000000000000000..4d17f87fdc7bc1456a118c84b76c631544572fd4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_hist_method.py @@ -0,0 +1,971 @@ +""" Test cases for .hist method """ +import re + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + date_range, + to_datetime, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_colors, + _check_legend_labels, + _check_patches_all_filled, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + get_x_axis, + get_y_axis, +) + +mpl = pytest.importorskip("matplotlib") + + +@pytest.fixture +def ts(): + return Series( + np.arange(30, dtype=np.float64), + index=date_range("2020-01-01", periods=30, freq="B"), + name="ts", + ) + + +class TestSeriesPlots: + @pytest.mark.parametrize("kwargs", [{}, {"grid": False}, {"figsize": (8, 10)}]) + def test_hist_legacy_kwargs(self, ts, kwargs): + _check_plot_works(ts.hist, **kwargs) + + @pytest.mark.parametrize("kwargs", [{}, {"bins": 5}]) + def test_hist_legacy_kwargs_warning(self, ts, kwargs): + # _check_plot_works adds an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(ts.hist, by=ts.index.month, **kwargs) + + def test_hist_legacy_ax(self, ts): + fig, ax = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, ax=ax, default_axes=True) + + def test_hist_legacy_ax_and_fig(self, ts): + fig, ax = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, ax=ax, figure=fig, default_axes=True) + + def test_hist_legacy_fig(self, ts): + fig, _ = mpl.pyplot.subplots(1, 1) + _check_plot_works(ts.hist, figure=fig, default_axes=True) + + def test_hist_legacy_multi_ax(self, ts): + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2) + _check_plot_works(ts.hist, figure=fig, ax=ax1, default_axes=True) + _check_plot_works(ts.hist, figure=fig, ax=ax2, default_axes=True) + + def test_hist_legacy_by_fig_error(self, ts): + fig, _ = mpl.pyplot.subplots(1, 1) + msg = ( + "Cannot pass 'figure' when using the 'by' argument, since a new 'Figure' " + "instance will be created" + ) + with pytest.raises(ValueError, match=msg): + ts.hist(by=ts.index, figure=fig) + + def test_hist_bins_legacy(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + ax = df.hist(bins=2)[0][0] + assert len(ax.patches) == 2 + + def test_hist_layout(self, hist_df): + df = hist_df + msg = "The 'layout' keyword is not supported when 'by' is None" + with pytest.raises(ValueError, match=msg): + df.height.hist(layout=(1, 1)) + + with pytest.raises(ValueError, match=msg): + df.height.hist(layout=[1, 1]) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, layout, axes_num, res_layout", + [ + ["gender", (2, 1), 2, (2, 1)], + ["gender", (3, -1), 2, (3, 1)], + ["category", (4, 1), 4, (4, 1)], + ["category", (2, -1), 4, (2, 2)], + ["category", (3, -1), 4, (3, 2)], + ["category", (-1, 4), 4, (1, 4)], + ["classroom", (2, 2), 3, (2, 2)], + ], + ) + def test_hist_layout_with_by(self, hist_df, by, layout, axes_num, res_layout): + df = hist_df + + # _check_plot_works adds an `ax` kwarg to the method call + # so we get a warning about an axis being cleared, even + # though we don't explicing pass one, see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.height.hist, by=getattr(df, by), layout=layout) + _check_axes_shape(axes, axes_num=axes_num, layout=res_layout) + + def test_hist_layout_with_by_shape(self, hist_df): + df = hist_df + + axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7)) + _check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7)) + + def test_hist_no_overlap(self): + from matplotlib.pyplot import ( + gcf, + subplot, + ) + + x = Series(np.random.default_rng(2).standard_normal(2)) + y = Series(np.random.default_rng(2).standard_normal(2)) + subplot(121) + x.hist() + subplot(122) + y.hist() + fig = gcf() + axes = fig.axes + assert len(axes) == 2 + + def test_hist_by_no_extra_plots(self, hist_df): + df = hist_df + df.height.hist(by=df.gender) + assert len(mpl.pyplot.get_fignums()) == 1 + + def test_plot_fails_when_ax_differs_from_figure(self, ts): + from pylab import figure + + fig1 = figure() + fig2 = figure() + ax1 = fig1.add_subplot(111) + msg = "passed axis not bound to passed figure" + with pytest.raises(AssertionError, match=msg): + ts.hist(ax=ax1, figure=fig2) + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + ser = Series(np.random.default_rng(2).integers(1, 10)) + ax = ser.hist(histtype=histtype) + _check_patches_all_filled(ax, filled=expected) + + @pytest.mark.parametrize( + "by, expected_axes_num, expected_layout", [(None, 1, (1, 1)), ("b", 2, (1, 2))] + ) + def test_hist_with_legend(self, by, expected_axes_num, expected_layout): + # GH 6279 - Series histogram can have a legend + index = 15 * ["1"] + 15 * ["2"] + s = Series(np.random.default_rng(2).standard_normal(30), index=index, name="a") + s.index.name = "b" + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works(s.hist, default_axes=True, legend=True, by=by) + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + _check_legend_labels(axes, "a") + + @pytest.mark.parametrize("by", [None, "b"]) + def test_hist_with_legend_raises(self, by): + # GH 6279 - Series histogram with legend and label raises + index = 15 * ["1"] + 15 * ["2"] + s = Series(np.random.default_rng(2).standard_normal(30), index=index, name="a") + s.index.name = "b" + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + s.hist(legend=True, by=by, label="c") + + def test_hist_kwargs(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 5 + _check_text_labels(ax.yaxis.get_label(), "Frequency") + + def test_hist_kwargs_horizontal(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + ax = ts.plot.hist(orientation="horizontal", ax=ax) + _check_text_labels(ax.xaxis.get_label(), "Frequency") + + def test_hist_kwargs_align(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(bins=5, ax=ax) + ax = ts.plot.hist(align="left", stacked=True, ax=ax) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_hist_kde(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + # ticks are values, thus ticklabels are blank + _check_text_labels(xlabels, [""] * len(xlabels)) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + def test_hist_kde_plot_works(self, ts): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.kde) + + def test_hist_kde_density_works(self, ts): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.density) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_hist_kde_logy(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.kde(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + _check_text_labels(xlabels, [""] * len(xlabels)) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + def test_hist_kde_color_bins(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.hist(logy=True, bins=10, color="b", ax=ax) + _check_ax_scales(ax, yaxis="log") + assert len(ax.patches) == 10 + _check_colors(ax.patches, facecolors=["b"] * 10) + + def test_hist_kde_color(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + ax = ts.plot.kde(logy=True, color="r", ax=ax) + _check_ax_scales(ax, yaxis="log") + lines = ax.get_lines() + assert len(lines) == 1 + _check_colors(lines, ["r"]) + + +class TestDataFramePlots: + @pytest.mark.slow + def test_hist_df_legacy(self, hist_df): + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(hist_df.hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout(self): + # make sure layout is handled + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, grid=False) + _check_axes_shape(axes, axes_num=3, layout=(2, 2)) + assert not axes[1, 1].get_visible() + + _check_plot_works(df[[2]].hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout2(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 1))) + _check_plot_works(df.hist) + + @pytest.mark.slow + def test_hist_df_legacy_layout3(self): + # make sure layout is handled + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + df[5] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, layout=(4, 2)) + _check_axes_shape(axes, axes_num=6, layout=(4, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", [{"sharex": True, "sharey": True}, {"figsize": (8, 10)}, {"bins": 5}] + ) + def test_hist_df_legacy_layout_kwargs(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + df[5] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + # make sure sharex, sharey is handled + # handle figsize arg + # check bins argument + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + _check_plot_works(df.hist, **kwargs) + + @pytest.mark.slow + def test_hist_df_legacy_layout_labelsize_rot(self, frame_or_series): + # make sure xlabelsize and xrot are handled + obj = frame_or_series(range(10)) + xf, yf = 20, 18 + xrot, yrot = 30, 40 + axes = obj.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + _check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + + @pytest.mark.slow + def test_hist_df_legacy_rectangles(self): + from matplotlib.patches import Rectangle + + ser = Series(range(10)) + ax = ser.hist(cumulative=True, bins=4, density=True) + # height of last bin (index 5) must be 1.0 + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + tm.assert_almost_equal(rects[-1].get_height(), 1.0) + + @pytest.mark.slow + def test_hist_df_legacy_scale(self): + ser = Series(range(10)) + ax = ser.hist(log=True) + # scale of y must be 'log' + _check_ax_scales(ax, yaxis="log") + + @pytest.mark.slow + def test_hist_df_legacy_external_error(self): + ser = Series(range(10)) + # propagate attr exception from matplotlib.Axes.hist + with tm.external_error_raised(AttributeError): + ser.hist(foo="bar") + + def test_hist_non_numerical_or_datetime_raises(self): + # gh-10444, GH32590 + df = DataFrame( + { + "a": np.random.default_rng(2).random(10), + "b": np.random.default_rng(2).integers(0, 10, 10), + "c": to_datetime( + np.random.default_rng(2).integers( + 1582800000000000000, 1583500000000000000, 10, dtype=np.int64 + ) + ), + "d": to_datetime( + np.random.default_rng(2).integers( + 1582800000000000000, 1583500000000000000, 10, dtype=np.int64 + ), + utc=True, + ), + } + ) + df_o = df.astype(object) + + msg = "hist method requires numerical or datetime columns, nothing to plot." + with pytest.raises(ValueError, match=msg): + df_o.hist() + + @pytest.mark.parametrize( + "layout_test", + ( + {"layout": None, "expected_size": (2, 2)}, # default is 2x2 + {"layout": (2, 2), "expected_size": (2, 2)}, + {"layout": (4, 1), "expected_size": (4, 1)}, + {"layout": (1, 4), "expected_size": (1, 4)}, + {"layout": (3, 3), "expected_size": (3, 3)}, + {"layout": (-1, 4), "expected_size": (1, 4)}, + {"layout": (4, -1), "expected_size": (4, 1)}, + {"layout": (-1, 2), "expected_size": (2, 2)}, + {"layout": (2, -1), "expected_size": (2, 2)}, + ), + ) + def test_hist_layout(self, layout_test): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + axes = df.hist(layout=layout_test["layout"]) + expected = layout_test["expected_size"] + _check_axes_shape(axes, axes_num=3, layout=expected) + + def test_hist_layout_error(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + # layout too small for all 4 plots + msg = "Layout of 1x1 must be larger than required size 3" + with pytest.raises(ValueError, match=msg): + df.hist(layout=(1, 1)) + + # invalid format for layout + msg = re.escape("Layout must be a tuple of (rows, columns)") + with pytest.raises(ValueError, match=msg): + df.hist(layout=(1,)) + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.hist(layout=(-1, -1)) + + # GH 9351 + def test_tight_layout(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 2))) + df[2] = to_datetime( + np.random.default_rng(2).integers( + 812419200000000000, + 819331200000000000, + size=100, + dtype=np.int64, + ) + ) + # Use default_axes=True when plotting method generate subplots itself + _check_plot_works(df.hist, default_axes=True) + mpl.pyplot.tight_layout() + + def test_hist_subplot_xrot(self): + # GH 30288 + df = DataFrame( + { + "length": [1.5, 0.5, 1.2, 0.9, 3], + "animal": ["pig", "rabbit", "pig", "pig", "rabbit"], + } + ) + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + column="length", + by="animal", + bins=5, + xrot=0, + ) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.parametrize( + "column, expected", + [ + (None, ["width", "length", "height"]), + (["length", "width", "height"], ["length", "width", "height"]), + ], + ) + def test_hist_column_order_unchanged(self, column, expected): + # GH29235 + + df = DataFrame( + { + "width": [0.7, 0.2, 0.15, 0.2, 1.1], + "length": [1.5, 0.5, 1.2, 0.9, 3], + "height": [3, 0.5, 3.4, 2, 1], + }, + index=["pig", "rabbit", "duck", "chicken", "horse"], + ) + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + column=column, + layout=(1, 3), + ) + result = [axes[0, i].get_title() for i in range(3)] + assert result == expected + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + df = DataFrame( + np.random.default_rng(2).integers(1, 10, size=(100, 2)), columns=["a", "b"] + ) + ax = df.hist(histtype=histtype) + _check_patches_all_filled(ax, filled=expected) + + @pytest.mark.parametrize("by", [None, "c"]) + @pytest.mark.parametrize("column", [None, "b"]) + def test_hist_with_legend(self, by, column): + # GH 6279 - DataFrame histogram can have a legend + expected_axes_num = 1 if by is None and column is not None else 2 + expected_layout = (1, expected_axes_num) + expected_labels = column or ["a", "b"] + if by is not None: + expected_labels = [expected_labels] * 2 + + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + + # Use default_axes=True when plotting method generate subplots itself + axes = _check_plot_works( + df.hist, + default_axes=True, + legend=True, + by=by, + column=column, + ) + + _check_axes_shape(axes, axes_num=expected_axes_num, layout=expected_layout) + if by is None and column is None: + axes = axes[0] + for expected_label, ax in zip(expected_labels, axes): + _check_legend_labels(ax, expected_label) + + @pytest.mark.parametrize("by", [None, "c"]) + @pytest.mark.parametrize("column", [None, "b"]) + def test_hist_with_legend_raises(self, by, column): + # GH 6279 - DataFrame histogram with legend and label raises + index = Index(15 * ["1"] + 15 * ["2"], name="c") + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), + index=index, + columns=["a", "b"], + ) + + with pytest.raises(ValueError, match="Cannot use both legend and label"): + df.hist(legend=True, by=by, column=column, label="d") + + def test_hist_df_kwargs(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 10 + + def test_hist_df_with_nonnumerics(self): + # GH 9853 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "B", "C", "D"], + ) + df["E"] = ["x", "y"] * 5 + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(bins=5, ax=ax) + assert len(ax.patches) == 20 + + def test_hist_df_with_nonnumerics_no_bins(self): + # GH 9853 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=["A", "B", "C", "D"], + ) + df["E"] = ["x", "y"] * 5 + _, ax = mpl.pyplot.subplots() + ax = df.plot.hist(ax=ax) # bins=10 + assert len(ax.patches) == 40 + + def test_hist_secondary_legend(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + + # primary -> secondary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, ax=ax) + df["b"].plot.hist(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_hist_secondary_secondary(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + # secondary -> secondary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax) + df["b"].plot.hist(ax=ax, legend=True, secondary_y=True) + # both legends are draw on left ax + # left axis must be invisible, right axis must be visible + _check_legend_labels(ax.left_ax, labels=["a (right)", "b (right)"]) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_hist_secondary_primary(self): + # GH 9610 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 4)), columns=list("abcd") + ) + # secondary -> primary + _, ax = mpl.pyplot.subplots() + ax = df["a"].plot.hist(legend=True, secondary_y=True, ax=ax) + # right axes is returned + df["b"].plot.hist(ax=ax, legend=True) + # both legends are draw on left ax + # left and right axis must be visible + _check_legend_labels(ax.left_ax, labels=["a (right)", "b"]) + assert ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_hist_with_nans_and_weights(self): + # GH 48884 + mpl_patches = pytest.importorskip("matplotlib.patches") + df = DataFrame( + [[np.nan, 0.2, 0.3], [0.4, np.nan, np.nan], [0.7, 0.8, 0.9]], + columns=list("abc"), + ) + weights = np.array([0.25, 0.3, 0.45]) + no_nan_df = DataFrame([[0.4, 0.2, 0.3], [0.7, 0.8, 0.9]], columns=list("abc")) + no_nan_weights = np.array([[0.3, 0.25, 0.25], [0.45, 0.45, 0.45]]) + + _, ax0 = mpl.pyplot.subplots() + df.plot.hist(ax=ax0, weights=weights) + rects = [x for x in ax0.get_children() if isinstance(x, mpl_patches.Rectangle)] + heights = [rect.get_height() for rect in rects] + _, ax1 = mpl.pyplot.subplots() + no_nan_df.plot.hist(ax=ax1, weights=no_nan_weights) + no_nan_rects = [ + x for x in ax1.get_children() if isinstance(x, mpl_patches.Rectangle) + ] + no_nan_heights = [rect.get_height() for rect in no_nan_rects] + assert all(h0 == h1 for h0, h1 in zip(heights, no_nan_heights)) + + idxerror_weights = np.array([[0.3, 0.25], [0.45, 0.45]]) + + msg = "weights must have the same shape as data, or be a single column" + with pytest.raises(ValueError, match=msg): + _, ax2 = mpl.pyplot.subplots() + no_nan_df.plot.hist(ax=ax2, weights=idxerror_weights) + + +class TestDataFrameGroupByPlots: + def test_grouped_hist_legacy(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(10) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + + axes = _grouped_hist(df.A, by=df.C) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + def test_grouped_hist_legacy_axes_shape_no_col(self): + rs = np.random.default_rng(10) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + axes = df.hist(by=df.C) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + def test_grouped_hist_legacy_single_key(self): + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + # group by a key with single value + axes = df.hist(by="D", rot=30) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + _check_ticks_props(axes, xrot=30) + + def test_grouped_hist_legacy_grouped_hist_kwargs(self): + from matplotlib.patches import Rectangle + + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + # make sure kwargs to hist are handled + xf, yf = 20, 18 + xrot, yrot = 30, 40 + + axes = _grouped_hist( + df.A, + by=df.C, + cumulative=True, + bins=4, + xlabelsize=xf, + xrot=xrot, + ylabelsize=yf, + yrot=yrot, + density=True, + ) + # height of last bin (index 5) must be 1.0 + for ax in axes.ravel(): + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + height = rects[-1].get_height() + tm.assert_almost_equal(height, 1.0) + _check_ticks_props(axes, xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot) + + def test_grouped_hist_legacy_grouped_hist(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + axes = _grouped_hist(df.A, by=df.C, log=True) + # scale of y must be 'log' + _check_ax_scales(axes, yaxis="log") + + def test_grouped_hist_legacy_external_err(self): + from pandas.plotting._matplotlib.hist import _grouped_hist + + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + # propagate attr exception from matplotlib.Axes.hist + with tm.external_error_raised(AttributeError): + _grouped_hist(df.A, by=df.C, foo="bar") + + def test_grouped_hist_legacy_figsize_err(self): + rs = np.random.default_rng(2) + df = DataFrame(rs.standard_normal((10, 1)), columns=["A"]) + df["B"] = to_datetime( + rs.integers( + 812419200000000000, + 819331200000000000, + size=10, + dtype=np.int64, + ) + ) + df["C"] = rs.integers(0, 4, 10) + df["D"] = ["X"] * 10 + msg = "Specify figure size by tuple instead" + with pytest.raises(ValueError, match=msg): + df.hist(by="C", figsize="default") + + def test_grouped_hist_legacy2(self): + n = 10 + weight = Series(np.random.default_rng(2).normal(166, 20, size=n)) + height = Series(np.random.default_rng(2).normal(60, 10, size=n)) + gender_int = np.random.default_rng(2).choice([0, 1], size=n) + df_int = DataFrame({"height": height, "weight": weight, "gender": gender_int}) + gb = df_int.groupby("gender") + axes = gb.hist() + assert len(axes) == 2 + assert len(mpl.pyplot.get_fignums()) == 2 + + @pytest.mark.slow + @pytest.mark.parametrize( + "msg, plot_col, by_col, layout", + [ + [ + "Layout of 1x1 must be larger than required size 2", + "weight", + "gender", + (1, 1), + ], + [ + "Layout of 1x3 must be larger than required size 4", + "height", + "category", + (1, 3), + ], + [ + "At least one dimension of layout must be positive", + "height", + "category", + (-1, -1), + ], + ], + ) + def test_grouped_hist_layout_error(self, hist_df, msg, plot_col, by_col, layout): + df = hist_df + with pytest.raises(ValueError, match=msg): + df.hist(column=plot_col, by=getattr(df, by_col), layout=layout) + + @pytest.mark.slow + def test_grouped_hist_layout_warning(self, hist_df): + df = hist_df + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + df.hist, column="height", by=df.gender, layout=(2, 1) + ) + _check_axes_shape(axes, axes_num=2, layout=(2, 1)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "layout, check_layout, figsize", + [[(4, 1), (4, 1), None], [(-1, 1), (4, 1), None], [(4, 2), (4, 2), (12, 8)]], + ) + def test_grouped_hist_layout_figsize(self, hist_df, layout, check_layout, figsize): + df = hist_df + axes = df.hist(column="height", by=df.category, layout=layout, figsize=figsize) + _check_axes_shape(axes, axes_num=4, layout=check_layout, figsize=figsize) + + @pytest.mark.slow + @pytest.mark.parametrize("kwargs", [{}, {"column": "height", "layout": (2, 2)}]) + def test_grouped_hist_layout_by_warning(self, hist_df, kwargs): + df = hist_df + # GH 6769 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works(df.hist, by="classroom", **kwargs) + _check_axes_shape(axes, axes_num=3, layout=(2, 2)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs, axes_num, layout", + [ + [{"by": "gender", "layout": (3, 5)}, 2, (3, 5)], + [{"column": ["height", "weight", "category"]}, 3, (2, 2)], + ], + ) + def test_grouped_hist_layout_axes(self, hist_df, kwargs, axes_num, layout): + df = hist_df + axes = df.hist(**kwargs) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + def test_grouped_hist_multiple_axes(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + fig, axes = mpl.pyplot.subplots(2, 3) + returned = df.hist(column=["height", "weight", "category"], ax=axes[0]) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[0]) + assert returned[0].figure is fig + + def test_grouped_hist_multiple_axes_no_cols(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + + fig, axes = mpl.pyplot.subplots(2, 3) + returned = df.hist(by="classroom", ax=axes[1]) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + tm.assert_numpy_array_equal(returned, axes[1]) + assert returned[0].figure is fig + + def test_grouped_hist_multiple_axes_error(self, hist_df): + # GH 6970, GH 7069 + df = hist_df + fig, axes = mpl.pyplot.subplots(2, 3) + # pass different number of axes from required + msg = "The number of passed axes must be 1, the same as the output plot" + with pytest.raises(ValueError, match=msg): + axes = df.hist(column="height", ax=axes) + + def test_axis_share_x(self, hist_df): + df = hist_df + # GH4089 + ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True) + + # share x + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + + # don't share y + assert not get_y_axis(ax1).joined(ax1, ax2) + assert not get_y_axis(ax2).joined(ax1, ax2) + + def test_axis_share_y(self, hist_df): + df = hist_df + ax1, ax2 = df.hist(column="height", by=df.gender, sharey=True) + + # share y + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + # don't share x + assert not get_x_axis(ax1).joined(ax1, ax2) + assert not get_x_axis(ax2).joined(ax1, ax2) + + def test_axis_share_xy(self, hist_df): + df = hist_df + ax1, ax2 = df.hist(column="height", by=df.gender, sharex=True, sharey=True) + + # share both x and y + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + @pytest.mark.parametrize( + "histtype, expected", + [ + ("bar", True), + ("barstacked", True), + ("step", False), + ("stepfilled", True), + ], + ) + def test_histtype_argument(self, histtype, expected): + # GH23992 Verify functioning of histtype argument + df = DataFrame( + np.random.default_rng(2).integers(1, 10, size=(10, 2)), columns=["a", "b"] + ) + ax = df.hist(by="a", histtype=histtype) + _check_patches_all_filled(ax, filled=expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..cfb657c2a800fefe2d509ddfb398399af4ce8649 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_misc.py @@ -0,0 +1,720 @@ +""" Test cases for misc plot functions """ +import os + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, + date_range, + interval_range, + period_range, + plotting, + read_csv, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_colors, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, +) + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") +cm = pytest.importorskip("matplotlib.cm") + + +@pytest.fixture +def iris(datapath) -> DataFrame: + """ + The iris dataset as a DataFrame. + """ + return read_csv(datapath("io", "data", "csv", "iris.csv")) + + +@td.skip_if_installed("matplotlib") +def test_import_error_message(): + # GH-19810 + df = DataFrame({"A": [1, 2]}) + + with pytest.raises(ImportError, match="matplotlib is required for plotting"): + df.plot() + + +def test_get_accessor_args(): + func = plotting._core.PlotAccessor._get_call_args + + msg = "Called plot accessor for type list, expected Series or DataFrame" + with pytest.raises(TypeError, match=msg): + func(backend_name="", data=[], args=[], kwargs={}) + + msg = "should not be called with positional arguments" + with pytest.raises(TypeError, match=msg): + func(backend_name="", data=Series(dtype=object), args=["line", None], kwargs={}) + + x, y, kind, kwargs = func( + backend_name="", + data=DataFrame(), + args=["x"], + kwargs={"y": "y", "kind": "bar", "grid": False}, + ) + assert x == "x" + assert y == "y" + assert kind == "bar" + assert kwargs == {"grid": False} + + x, y, kind, kwargs = func( + backend_name="pandas.plotting._matplotlib", + data=Series(dtype=object), + args=[], + kwargs={}, + ) + assert x is None + assert y is None + assert kind == "line" + assert len(kwargs) == 24 + + +@pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds) +@pytest.mark.parametrize( + "data", [DataFrame(np.arange(15).reshape(5, 3)), Series(range(5))] +) +@pytest.mark.parametrize( + "index", + [ + Index(range(5)), + date_range("2020-01-01", periods=5), + period_range("2020-01-01", periods=5), + ], +) +def test_savefig(kind, data, index): + fig, ax = plt.subplots() + data.index = index + kwargs = {} + if kind in ["hexbin", "scatter", "pie"]: + if isinstance(data, Series): + pytest.skip(f"{kind} not supported with Series") + kwargs = {"x": 0, "y": 1} + data.plot(kind=kind, ax=ax, **kwargs) + fig.savefig(os.devnull) + + +class TestSeriesPlots: + def test_autocorrelation_plot(self): + from pandas.plotting import autocorrelation_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(autocorrelation_plot, series=ser) + _check_plot_works(autocorrelation_plot, series=ser.values) + + ax = autocorrelation_plot(ser, label="Test") + _check_legend_labels(ax, labels=["Test"]) + + @pytest.mark.parametrize("kwargs", [{}, {"lag": 5}]) + def test_lag_plot(self, kwargs): + from pandas.plotting import lag_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + _check_plot_works(lag_plot, series=ser, **kwargs) + + def test_bootstrap_plot(self): + from pandas.plotting import bootstrap_plot + + ser = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + _check_plot_works(bootstrap_plot, series=ser, size=10) + + +class TestDataFramePlots: + @pytest.mark.parametrize("pass_axis", [False, True]) + def test_scatter_matrix_axis(self, pass_axis): + pytest.importorskip("scipy") + scatter_matrix = plotting.scatter_matrix + + ax = None + if pass_axis: + _, ax = mpl.pyplot.subplots(3, 3) + + df = DataFrame(np.random.default_rng(2).standard_normal((100, 3))) + + # we are plotting multiples on a sub-plot + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + scatter_matrix, + frame=df, + range_padding=0.1, + ax=ax, + ) + axes0_labels = axes[0][0].yaxis.get_majorticklabels() + # GH 5662 + expected = ["-2", "0", "2"] + _check_text_labels(axes0_labels, expected) + _check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + @pytest.mark.parametrize("pass_axis", [False, True]) + def test_scatter_matrix_axis_smaller(self, pass_axis): + pytest.importorskip("scipy") + scatter_matrix = plotting.scatter_matrix + + ax = None + if pass_axis: + _, ax = mpl.pyplot.subplots(3, 3) + + df = DataFrame(np.random.default_rng(11).standard_normal((100, 3))) + df[0] = (df[0] - 2) / 3 + + # we are plotting multiples on a sub-plot + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + scatter_matrix, + frame=df, + range_padding=0.1, + ax=ax, + ) + axes0_labels = axes[0][0].yaxis.get_majorticklabels() + expected = ["-1.0", "-0.5", "0.0"] + _check_text_labels(axes0_labels, expected) + _check_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + @pytest.mark.slow + def test_andrews_curves_no_warning(self, iris): + from pandas.plotting import andrews_curves + + df = iris + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(andrews_curves, frame=df, class_column="Name") + + @pytest.mark.slow + @pytest.mark.parametrize( + "linecolors", + [ + ("#556270", "#4ECDC4", "#C7F464"), + ["dodgerblue", "aquamarine", "seagreen"], + ], + ) + @pytest.mark.parametrize( + "df", + [ + "iris", + DataFrame( + { + "A": np.random.default_rng(2).standard_normal(10), + "B": np.random.default_rng(2).standard_normal(10), + "C": np.random.default_rng(2).standard_normal(10), + "Name": ["A"] * 10, + } + ), + ], + ) + def test_andrews_curves_linecolors(self, request, df, linecolors): + from pandas.plotting import andrews_curves + + if isinstance(df, str): + df = request.getfixturevalue(df) + ax = _check_plot_works( + andrews_curves, frame=df, class_column="Name", color=linecolors + ) + _check_colors( + ax.get_lines()[:10], linecolors=linecolors, mapping=df["Name"][:10] + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "df", + [ + "iris", + DataFrame( + { + "A": np.random.default_rng(2).standard_normal(10), + "B": np.random.default_rng(2).standard_normal(10), + "C": np.random.default_rng(2).standard_normal(10), + "Name": ["A"] * 10, + } + ), + ], + ) + def test_andrews_curves_cmap(self, request, df): + from pandas.plotting import andrews_curves + + if isinstance(df, str): + df = request.getfixturevalue(df) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + ax = _check_plot_works( + andrews_curves, frame=df, class_column="Name", color=cmaps + ) + _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_andrews_curves_handle(self): + from pandas.plotting import andrews_curves + + colors = ["b", "g", "r"] + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors}) + ax = andrews_curves(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, linecolors=colors) + + @pytest.mark.slow + @pytest.mark.parametrize( + "color", + [("#556270", "#4ECDC4", "#C7F464"), ["dodgerblue", "aquamarine", "seagreen"]], + ) + def test_parallel_coordinates_colors(self, iris, color): + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", color=color + ) + _check_colors(ax.get_lines()[:10], linecolors=color, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_parallel_coordinates_cmap(self, iris): + from matplotlib import cm + + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", colormap=cm.jet + ) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + _check_colors(ax.get_lines()[:10], linecolors=cmaps, mapping=df["Name"][:10]) + + @pytest.mark.slow + def test_parallel_coordinates_line_diff(self, iris): + from pandas.plotting import parallel_coordinates + + df = iris + + ax = _check_plot_works(parallel_coordinates, frame=df, class_column="Name") + nlines = len(ax.get_lines()) + nxticks = len(ax.xaxis.get_ticklabels()) + + ax = _check_plot_works( + parallel_coordinates, frame=df, class_column="Name", axvlines=False + ) + assert len(ax.get_lines()) == (nlines - nxticks) + + @pytest.mark.slow + def test_parallel_coordinates_handles(self, iris): + from pandas.plotting import parallel_coordinates + + df = iris + colors = ["b", "g", "r"] + df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3], "C": [1, 2, 3], "Name": colors}) + ax = parallel_coordinates(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, linecolors=colors) + + # not sure if this is indicative of a problem + @pytest.mark.filterwarnings("ignore:Attempting to set:UserWarning") + def test_parallel_coordinates_with_sorted_labels(self): + """For #15908""" + from pandas.plotting import parallel_coordinates + + df = DataFrame( + { + "feat": list(range(30)), + "class": [2 for _ in range(10)] + + [3 for _ in range(10)] + + [1 for _ in range(10)], + } + ) + ax = parallel_coordinates(df, "class", sort_labels=True) + polylines, labels = ax.get_legend_handles_labels() + color_label_tuples = zip( + [polyline.get_color() for polyline in polylines], labels + ) + ordered_color_label_tuples = sorted(color_label_tuples, key=lambda x: x[1]) + prev_next_tupels = zip( + list(ordered_color_label_tuples[0:-1]), list(ordered_color_label_tuples[1:]) + ) + for prev, nxt in prev_next_tupels: + # labels and colors are ordered strictly increasing + assert prev[1] < nxt[1] and prev[0] < nxt[0] + + def test_radviz_no_warning(self, iris): + from pandas.plotting import radviz + + df = iris + # Ensure no UserWarning when making plot + with tm.assert_produces_warning(None): + _check_plot_works(radviz, frame=df, class_column="Name") + + @pytest.mark.parametrize( + "color", + [("#556270", "#4ECDC4", "#C7F464"), ["dodgerblue", "aquamarine", "seagreen"]], + ) + def test_radviz_color(self, iris, color): + from pandas.plotting import radviz + + df = iris + ax = _check_plot_works(radviz, frame=df, class_column="Name", color=color) + # skip Circle drawn as ticks + patches = [p for p in ax.patches[:20] if p.get_label() != ""] + _check_colors(patches[:10], facecolors=color, mapping=df["Name"][:10]) + + def test_radviz_color_cmap(self, iris): + from matplotlib import cm + + from pandas.plotting import radviz + + df = iris + ax = _check_plot_works(radviz, frame=df, class_column="Name", colormap=cm.jet) + cmaps = [cm.jet(n) for n in np.linspace(0, 1, df["Name"].nunique())] + patches = [p for p in ax.patches[:20] if p.get_label() != ""] + _check_colors(patches, facecolors=cmaps, mapping=df["Name"][:10]) + + def test_radviz_colors_handles(self): + from pandas.plotting import radviz + + colors = [[0.0, 0.0, 1.0, 1.0], [0.0, 0.5, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0]] + df = DataFrame( + {"A": [1, 2, 3], "B": [2, 1, 3], "C": [3, 2, 1], "Name": ["b", "g", "r"]} + ) + ax = radviz(df, "Name", color=colors) + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=colors) + + def test_subplot_titles(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + + # Case len(title) == len(df) + plot = df.plot(subplots=True, title=title) + assert [p.get_title() for p in plot] == title + + def test_subplot_titles_too_much(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case len(title) > len(df) + msg = ( + "The length of `title` must equal the number of columns if " + "using `title` of type `list` and `subplots=True`" + ) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, title=title + ["kittens > puppies"]) + + def test_subplot_titles_too_little(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + msg = ( + "The length of `title` must equal the number of columns if " + "using `title` of type `list` and `subplots=True`" + ) + # Case len(title) < len(df) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, title=title[:2]) + + def test_subplot_titles_subplots_false(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case subplots=False and title is of type list + msg = ( + "Using `title` of type `list` is not supported unless " + "`subplots=True` is passed" + ) + with pytest.raises(ValueError, match=msg): + df.plot(subplots=False, title=title) + + def test_subplot_titles_numeric_square_layout(self, iris): + df = iris.drop("Name", axis=1).head() + # Use the column names as the subplot titles + title = list(df.columns) + # Case df with 3 numeric columns but layout of (2,2) + plot = df.drop("SepalWidth", axis=1).plot( + subplots=True, layout=(2, 2), title=title[:-1] + ) + title_list = [ax.get_title() for sublist in plot for ax in sublist] + assert title_list == title[:3] + [""] + + def test_get_standard_colors_random_seed(self): + # GH17525 + df = DataFrame(np.zeros((10, 10))) + + # Make sure that the random seed isn't reset by get_standard_colors + plotting.parallel_coordinates(df, 0) + rand1 = np.random.default_rng(None).random() + plotting.parallel_coordinates(df, 0) + rand2 = np.random.default_rng(None).random() + assert rand1 != rand2 + + def test_get_standard_colors_consistency(self): + # GH17525 + # Make sure it produces the same colors every time it's called + from pandas.plotting._matplotlib.style import get_standard_colors + + color1 = get_standard_colors(1, color_type="random") + color2 = get_standard_colors(1, color_type="random") + assert color1 == color2 + + def test_get_standard_colors_default_num_colors(self): + from pandas.plotting._matplotlib.style import get_standard_colors + + # Make sure the default color_types returns the specified amount + color1 = get_standard_colors(1, color_type="default") + color2 = get_standard_colors(9, color_type="default") + color3 = get_standard_colors(20, color_type="default") + assert len(color1) == 1 + assert len(color2) == 9 + assert len(color3) == 20 + + def test_plot_single_color(self): + # Example from #20585. All 3 bars should have the same color + df = DataFrame( + { + "account-start": ["2017-02-03", "2017-03-03", "2017-01-01"], + "client": ["Alice Anders", "Bob Baker", "Charlie Chaplin"], + "balance": [-1432.32, 10.43, 30000.00], + "db-id": [1234, 2424, 251], + "proxy-id": [525, 1525, 2542], + "rank": [52, 525, 32], + } + ) + ax = df.client.value_counts().plot.bar() + colors = [rect.get_facecolor() for rect in ax.get_children()[0:3]] + assert all(color == colors[0] for color in colors) + + def test_get_standard_colors_no_appending(self): + # GH20726 + + # Make sure not to add more colors so that matplotlib can cycle + # correctly. + from matplotlib import cm + + from pandas.plotting._matplotlib.style import get_standard_colors + + color_before = cm.gnuplot(range(5)) + color_after = get_standard_colors(1, color=color_before) + assert len(color_after) == len(color_before) + + df = DataFrame( + np.random.default_rng(2).standard_normal((48, 4)), columns=list("ABCD") + ) + + color_list = cm.gnuplot(np.linspace(0, 1, 16)) + p = df.A.plot.bar(figsize=(16, 7), color=color_list) + assert p.patches[1].get_facecolor() == p.patches[17].get_facecolor() + + @pytest.mark.parametrize("kind", ["bar", "line"]) + def test_dictionary_color(self, kind): + # issue-8193 + # Test plot color dictionary format + data_files = ["a", "b"] + + expected = [(0.5, 0.24, 0.6), (0.3, 0.7, 0.7)] + + df1 = DataFrame(np.random.default_rng(2).random((2, 2)), columns=data_files) + dic_color = {"b": (0.3, 0.7, 0.7), "a": (0.5, 0.24, 0.6)} + + ax = df1.plot(kind=kind, color=dic_color) + if kind == "bar": + colors = [rect.get_facecolor()[0:-1] for rect in ax.get_children()[0:3:2]] + else: + colors = [rect.get_color() for rect in ax.get_lines()[0:2]] + assert all(color == expected[index] for index, color in enumerate(colors)) + + def test_bar_plot(self): + # GH38947 + # Test bar plot with string and int index + from matplotlib.text import Text + + expected = [Text(0, 0, "0"), Text(1, 0, "Total")] + + df = DataFrame( + { + "a": [1, 2], + }, + index=Index([0, "Total"]), + ) + plot_bar = df.plot.bar() + assert all( + (a.get_text() == b.get_text()) + for a, b in zip(plot_bar.get_xticklabels(), expected) + ) + + def test_barh_plot_labels_mixed_integer_string(self): + # GH39126 + # Test barh plot with string and integer at the same column + from matplotlib.text import Text + + df = DataFrame([{"word": 1, "value": 0}, {"word": "knowledge", "value": 2}]) + plot_barh = df.plot.barh(x="word", legend=None) + expected_yticklabels = [Text(0, 0, "1"), Text(0, 1, "knowledge")] + assert all( + actual.get_text() == expected.get_text() + for actual, expected in zip( + plot_barh.get_yticklabels(), expected_yticklabels + ) + ) + + def test_has_externally_shared_axis_x_axis(self): + # GH33819 + # Test _has_externally_shared_axis() works for x-axis + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(2, 4) + + # Create *externally* shared axes for first and third columns + plots[0][0] = fig.add_subplot(231, sharex=plots[1][0]) + plots[0][2] = fig.add_subplot(233, sharex=plots[1][2]) + + # Create *internally* shared axes for second and third columns + plots[0][1].twinx() + plots[0][2].twinx() + + # First column is only externally shared + # Second column is only internally shared + # Third column is both + # Fourth column is neither + assert func(plots[0][0], "x") + assert not func(plots[0][1], "x") + assert func(plots[0][2], "x") + assert not func(plots[0][3], "x") + + def test_has_externally_shared_axis_y_axis(self): + # GH33819 + # Test _has_externally_shared_axis() works for y-axis + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(4, 2) + + # Create *externally* shared axes for first and third rows + plots[0][0] = fig.add_subplot(321, sharey=plots[0][1]) + plots[2][0] = fig.add_subplot(325, sharey=plots[2][1]) + + # Create *internally* shared axes for second and third rows + plots[1][0].twiny() + plots[2][0].twiny() + + # First row is only externally shared + # Second row is only internally shared + # Third row is both + # Fourth row is neither + assert func(plots[0][0], "y") + assert not func(plots[1][0], "y") + assert func(plots[2][0], "y") + assert not func(plots[3][0], "y") + + def test_has_externally_shared_axis_invalid_compare_axis(self): + # GH33819 + # Test _has_externally_shared_axis() raises an exception when + # passed an invalid value as compare_axis parameter + func = plotting._matplotlib.tools._has_externally_shared_axis + + fig = mpl.pyplot.figure() + plots = fig.subplots(4, 2) + + # Create arbitrary axes + plots[0][0] = fig.add_subplot(321, sharey=plots[0][1]) + + # Check that an invalid compare_axis value triggers the expected exception + msg = "needs 'x' or 'y' as a second parameter" + with pytest.raises(ValueError, match=msg): + func(plots[0][0], "z") + + def test_externally_shared_axes(self): + # Example from GH33819 + # Create data + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(1000), + "b": np.random.default_rng(2).standard_normal(1000), + } + ) + + # Create figure + fig = mpl.pyplot.figure() + plots = fig.subplots(2, 3) + + # Create *externally* shared axes + plots[0][0] = fig.add_subplot(231, sharex=plots[1][0]) + # note: no plots[0][1] that's the twin only case + plots[0][2] = fig.add_subplot(233, sharex=plots[1][2]) + + # Create *internally* shared axes + # note: no plots[0][0] that's the external only case + twin_ax1 = plots[0][1].twinx() + twin_ax2 = plots[0][2].twinx() + + # Plot data to primary axes + df["a"].plot(ax=plots[0][0], title="External share only").set_xlabel( + "this label should never be visible" + ) + df["a"].plot(ax=plots[1][0]) + + df["a"].plot(ax=plots[0][1], title="Internal share (twin) only").set_xlabel( + "this label should always be visible" + ) + df["a"].plot(ax=plots[1][1]) + + df["a"].plot(ax=plots[0][2], title="Both").set_xlabel( + "this label should never be visible" + ) + df["a"].plot(ax=plots[1][2]) + + # Plot data to twinned axes + df["b"].plot(ax=twin_ax1, color="green") + df["b"].plot(ax=twin_ax2, color="yellow") + + assert not plots[0][0].xaxis.get_label().get_visible() + assert plots[0][1].xaxis.get_label().get_visible() + assert not plots[0][2].xaxis.get_label().get_visible() + + def test_plot_bar_axis_units_timestamp_conversion(self): + # GH 38736 + # Ensure string x-axis from the second plot will not be converted to datetime + # due to axis data from first plot + df = DataFrame( + [1.0], + index=[Timestamp("2022-02-22 22:22:22")], + ) + _check_plot_works(df.plot) + s = Series({"A": 1.0}) + _check_plot_works(s.plot.bar) + + def test_bar_plt_xaxis_intervalrange(self): + # GH 38969 + # Ensure IntervalIndex x-axis produces a bar plot as expected + from matplotlib.text import Text + + expected = [Text(0, 0, "([0, 1],)"), Text(1, 0, "([1, 2],)")] + s = Series( + [1, 2], + index=[interval_range(0, 2, closed="both")], + ) + _check_plot_works(s.plot.bar) + assert all( + (a.get_text() == b.get_text()) + for a, b in zip(s.plot.bar().get_xticklabels(), expected) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py new file mode 100644 index 0000000000000000000000000000000000000000..2b2f2f3b84307b9ed69e440d2ac0112abf153e67 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_series.py @@ -0,0 +1,985 @@ +""" Test cases for Series.plot """ +from datetime import datetime +from itertools import chain + +import numpy as np +import pytest + +from pandas.compat import is_platform_linux +from pandas.compat.numpy import np_version_gte1p24 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, + period_range, + plotting, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_colors, + _check_grid_settings, + _check_has_errorbars, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + _unpack_cycler, + get_y_axis, +) + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +@pytest.fixture +def ts(): + return Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + + +@pytest.fixture +def series(): + return Series( + range(20), dtype=np.float64, name="series", index=[f"i_{i}" for i in range(20)] + ) + + +class TestSeriesPlots: + @pytest.mark.slow + @pytest.mark.parametrize("kwargs", [{"label": "foo"}, {"use_index": False}]) + def test_plot(self, ts, kwargs): + _check_plot_works(ts.plot, **kwargs) + + @pytest.mark.slow + def test_plot_tick_props(self, ts): + axes = _check_plot_works(ts.plot, rot=0) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "scale, exp_scale", + [ + [{"logy": True}, {"yaxis": "log"}], + [{"logx": True}, {"xaxis": "log"}], + [{"loglog": True}, {"xaxis": "log", "yaxis": "log"}], + ], + ) + def test_plot_scales(self, ts, scale, exp_scale): + ax = _check_plot_works(ts.plot, style=".", **scale) + _check_ax_scales(ax, **exp_scale) + + @pytest.mark.slow + def test_plot_ts_bar(self, ts): + _check_plot_works(ts[:10].plot.bar) + + @pytest.mark.slow + def test_plot_ts_area_stacked(self, ts): + _check_plot_works(ts.plot.area, stacked=False) + + def test_plot_iseries(self): + ser = Series(range(5), period_range("2020-01-01", periods=5)) + _check_plot_works(ser.plot) + + @pytest.mark.parametrize( + "kind", + [ + "line", + "bar", + "barh", + pytest.param("kde", marks=td.skip_if_no("scipy")), + "hist", + "box", + ], + ) + def test_plot_series_kinds(self, series, kind): + _check_plot_works(series[:5].plot, kind=kind) + + def test_plot_series_barh(self, series): + _check_plot_works(series[:10].plot.barh) + + def test_plot_series_bar_ax(self): + ax = _check_plot_works( + Series(np.random.default_rng(2).standard_normal(10)).plot.bar, color="black" + ) + _check_colors([ax.patches[0]], facecolors=["black"]) + + @pytest.mark.parametrize("kwargs", [{}, {"layout": (-1, 1)}, {"layout": (1, -1)}]) + def test_plot_6951(self, ts, kwargs): + # GH 6951 + ax = _check_plot_works(ts.plot, subplots=True, **kwargs) + _check_axes_shape(ax, axes_num=1, layout=(1, 1)) + + def test_plot_figsize_and_title(self, series): + # figsize and title + _, ax = mpl.pyplot.subplots() + ax = series.plot(title="Test", figsize=(16, 8), ax=ax) + _check_text_labels(ax.title, "Test") + _check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8)) + + def test_dont_modify_rcParams(self): + # GH 8242 + key = "axes.prop_cycle" + colors = mpl.pyplot.rcParams[key] + _, ax = mpl.pyplot.subplots() + Series([1, 2, 3]).plot(ax=ax) + assert colors == mpl.pyplot.rcParams[key] + + @pytest.mark.parametrize("kwargs", [{}, {"secondary_y": True}]) + def test_ts_line_lim(self, ts, kwargs): + _, ax = mpl.pyplot.subplots() + ax = ts.plot(ax=ax, **kwargs) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data(orig=False)[0][0] + assert xmax >= lines[0].get_data(orig=False)[0][-1] + + def test_ts_area_lim(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.area(stacked=False, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_ts_area_lim_xcompat(self, ts): + # GH 7471 + _, ax = mpl.pyplot.subplots() + ax = ts.plot.area(stacked=False, x_compat=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=30) + + def test_ts_tz_area_lim_xcompat(self, ts): + tz_ts = ts.copy() + tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET") + _, ax = mpl.pyplot.subplots() + ax = tz_ts.plot.area(stacked=False, x_compat=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_ts_tz_area_lim_xcompat_secondary_y(self, ts): + tz_ts = ts.copy() + tz_ts.index = tz_ts.tz_localize("GMT").tz_convert("CET") + _, ax = mpl.pyplot.subplots() + ax = tz_ts.plot.area(stacked=False, secondary_y=True, ax=ax) + xmin, xmax = ax.get_xlim() + line = ax.get_lines()[0].get_data(orig=False)[0] + assert xmin <= line[0] + assert xmax >= line[-1] + _check_ticks_props(ax, xrot=0) + + def test_area_sharey_dont_overwrite(self, ts): + # GH37942 + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True) + + abs(ts).plot(ax=ax1, kind="area") + abs(ts).plot(ax=ax2, kind="area") + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + plt.close(fig) + + def test_label(self): + s = Series([1, 2]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(label="LABEL", legend=True, ax=ax) + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_label_none(self): + s = Series([1, 2]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=[""]) + mpl.pyplot.close("all") + + def test_label_ser_name(self): + s = Series([1, 2], name="NAME") + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=["NAME"]) + mpl.pyplot.close("all") + + def test_label_ser_name_override(self): + s = Series([1, 2], name="NAME") + # override the default + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=True, label="LABEL", ax=ax) + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_label_ser_name_override_dont_draw(self): + s = Series([1, 2], name="NAME") + # Add lebel info, but don't draw + _, ax = mpl.pyplot.subplots() + ax = s.plot(legend=False, label="LABEL", ax=ax) + assert ax.get_legend() is None # Hasn't been drawn + ax.legend() # draw it + _check_legend_labels(ax, labels=["LABEL"]) + mpl.pyplot.close("all") + + def test_boolean(self): + # GH 23719 + s = Series([False, False, True]) + _check_plot_works(s.plot, include_bool=True) + + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + _check_plot_works(s.plot) + + @pytest.mark.parametrize("index", [None, date_range("2020-01-01", periods=4)]) + def test_line_area_nan_series(self, index): + values = [1, 2, np.nan, 3] + d = Series(values, index=index) + ax = _check_plot_works(d.plot) + masked = ax.lines[0].get_ydata() + # remove nan for comparison purpose + exp = np.array([1, 2, 3], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked.data, 2), exp) + tm.assert_numpy_array_equal(masked.mask, np.array([False, False, True, False])) + + expected = np.array([1, 2, 0, 3], dtype=np.float64) + ax = _check_plot_works(d.plot, stacked=True) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + ax = _check_plot_works(d.plot.area) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + ax = _check_plot_works(d.plot.area, stacked=False) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected) + + def test_line_use_index_false(self): + s = Series([1, 2, 3], index=["a", "b", "c"]) + s.index.name = "The Index" + _, ax = mpl.pyplot.subplots() + ax = s.plot(use_index=False, ax=ax) + label = ax.get_xlabel() + assert label == "" + + def test_line_use_index_false_diff_var(self): + s = Series([1, 2, 3], index=["a", "b", "c"]) + s.index.name = "The Index" + _, ax = mpl.pyplot.subplots() + ax2 = s.plot.bar(use_index=False, ax=ax) + label2 = ax2.get_xlabel() + assert label2 == "" + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + @pytest.mark.parametrize("axis, meth", [("yaxis", "bar"), ("xaxis", "barh")]) + def test_bar_log(self, axis, meth): + expected = np.array([1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]) + + _, ax = mpl.pyplot.subplots() + ax = getattr(Series([200, 500]).plot, meth)(log=True, ax=ax) + tm.assert_numpy_array_equal(getattr(ax, axis).get_ticklocs(), expected) + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + @pytest.mark.parametrize( + "axis, kind, res_meth", + [["yaxis", "bar", "get_ylim"], ["xaxis", "barh", "get_xlim"]], + ) + def test_bar_log_kind_bar(self, axis, kind, res_meth): + # GH 9905 + expected = np.array([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1]) + + _, ax = mpl.pyplot.subplots() + ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind=kind, ax=ax) + ymin = 0.0007943282347242822 + ymax = 0.12589254117941673 + res = getattr(ax, res_meth)() + tm.assert_almost_equal(res[0], ymin) + tm.assert_almost_equal(res[1], ymax) + tm.assert_numpy_array_equal(getattr(ax, axis).get_ticklocs(), expected) + + def test_bar_ignore_index(self): + df = Series([1, 2, 3, 4], index=["a", "b", "c", "d"]) + _, ax = mpl.pyplot.subplots() + ax = df.plot.bar(use_index=False, ax=ax) + _check_text_labels(ax.get_xticklabels(), ["0", "1", "2", "3"]) + + def test_bar_user_colors(self): + s = Series([1, 2, 3, 4]) + ax = s.plot.bar(color=["red", "blue", "blue", "red"]) + result = [p.get_facecolor() for p in ax.patches] + expected = [ + (1.0, 0.0, 0.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (1.0, 0.0, 0.0, 1.0), + ] + assert result == expected + + def test_rotation_default(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # Default rot 0 + _, ax = mpl.pyplot.subplots() + axes = df.plot(ax=ax) + _check_ticks_props(axes, xrot=0) + + def test_rotation_30(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + _, ax = mpl.pyplot.subplots() + axes = df.plot(rot=30, ax=ax) + _check_ticks_props(axes, xrot=30) + + def test_irregular_datetime(self): + from pandas.plotting._matplotlib.converter import DatetimeConverter + + rng = date_range("1/1/2000", "3/1/2000") + rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]] + ser = Series(np.random.default_rng(2).standard_normal(len(rng)), rng) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + xp = DatetimeConverter.convert(datetime(1999, 1, 1), "", ax) + ax.set_xlim("1/1/1999", "1/1/2001") + assert xp == ax.get_xlim()[0] + _check_ticks_props(ax, xrot=30) + + def test_unsorted_index_xlim(self): + ser = Series( + [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0], + index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0], + ) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data(orig=False)[0]) + assert xmax >= np.nanmax(lines[0].get_data(orig=False)[0]) + + def test_pie_series(self): + # if sum of values is less than 1.0, pie handle them as rate and draw + # semicircle. + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + ax = _check_plot_works(series.plot.pie) + _check_text_labels(ax.texts, series.index) + assert ax.get_ylabel() == "YLABEL" + + def test_pie_series_no_label(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + ax = _check_plot_works(series.plot.pie, labels=None) + _check_text_labels(ax.texts, [""] * 5) + + def test_pie_series_less_colors_than_elements(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + color_args = ["r", "g", "b"] + ax = _check_plot_works(series.plot.pie, colors=color_args) + + color_expected = ["r", "g", "b", "r", "g"] + _check_colors(ax.patches, facecolors=color_expected) + + def test_pie_series_labels_and_colors(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + # with labels and colors + labels = ["A", "B", "C", "D", "E"] + color_args = ["r", "g", "b", "c", "m"] + ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args) + _check_text_labels(ax.texts, labels) + _check_colors(ax.patches, facecolors=color_args) + + def test_pie_series_autopct_and_fontsize(self): + series = Series( + np.random.default_rng(2).integers(1, 5), + index=["a", "b", "c", "d", "e"], + name="YLABEL", + ) + color_args = ["r", "g", "b", "c", "m"] + ax = _check_plot_works( + series.plot.pie, colors=color_args, autopct="%.2f", fontsize=7 + ) + pcts = [f"{s*100:.2f}" for s in series.values / series.sum()] + expected_texts = list(chain.from_iterable(zip(series.index, pcts))) + _check_text_labels(ax.texts, expected_texts) + for t in ax.texts: + assert t.get_fontsize() == 7 + + def test_pie_series_negative_raises(self): + # includes negative value + series = Series([1, 2, 0, 4, -1], index=["a", "b", "c", "d", "e"]) + with pytest.raises(ValueError, match="pie plot doesn't allow negative values"): + series.plot.pie() + + def test_pie_series_nan(self): + # includes nan + series = Series([1, 2, np.nan, 4], index=["a", "b", "c", "d"], name="YLABEL") + ax = _check_plot_works(series.plot.pie) + _check_text_labels(ax.texts, ["a", "b", "", "d"]) + + def test_pie_nan(self): + s = Series([1, np.nan, 1, 1]) + _, ax = mpl.pyplot.subplots() + ax = s.plot.pie(legend=True, ax=ax) + expected = ["0", "", "2", "3"] + result = [x.get_text() for x in ax.texts] + assert result == expected + + def test_df_series_secondary_legend(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + + # primary -> secondary (without passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + s.plot(legend=True, secondary_y=True, ax=ax) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b", "c", "x (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_with_axes(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # primary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left and right axis must be visible + _check_legend_labels(ax, labels=["a", "b", "c", "x (right)"]) + assert ax.get_yaxis().get_visible() + assert ax.right_ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (without passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, ax=ax) + s.plot(legend=True, secondary_y=True, ax=ax) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a (right)", "b (right)", "c (right)", "x (right)"] + _check_legend_labels(ax.left_ax, labels=expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both_with_axis(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a (right)", "b (right)", "c (right)", "x (right)"] + _check_legend_labels(ax.left_ax, expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + def test_df_series_secondary_legend_both_with_axis_2(self): + # GH 9779 + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 3)), columns=list("abc") + ) + s = Series(np.random.default_rng(2).standard_normal(30), name="x") + # secondary -> secondary (with passing ax) + _, ax = mpl.pyplot.subplots() + ax = df.plot(secondary_y=True, mark_right=False, ax=ax) + s.plot(ax=ax, legend=True, secondary_y=True) + # both legends are drawn on left ax + # left axis must be invisible and right axis must be visible + expected = ["a", "b", "c", "x (right)"] + _check_legend_labels(ax.left_ax, expected) + assert not ax.left_ax.get_yaxis().get_visible() + assert ax.get_yaxis().get_visible() + + @pytest.mark.parametrize( + "input_logy, expected_scale", [(True, "log"), ("sym", "symlog")] + ) + def test_secondary_logy(self, input_logy, expected_scale): + # GH 25545 + s1 = Series(np.random.default_rng(2).standard_normal(100)) + s2 = Series(np.random.default_rng(2).standard_normal(100)) + + # GH 24980 + ax1 = s1.plot(logy=input_logy) + ax2 = s2.plot(secondary_y=True, logy=input_logy) + + assert ax1.get_yscale() == expected_scale + assert ax2.get_yscale() == expected_scale + + def test_plot_fails_with_dupe_color_and_style(self): + x = Series(np.random.default_rng(2).standard_normal(2)) + _, ax = mpl.pyplot.subplots() + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' keyword " + "argument. Please use one or the other or pass 'style' without a color " + "symbol" + ) + with pytest.raises(ValueError, match=msg): + x.plot(style="k--", color="k", ax=ax) + + @pytest.mark.parametrize( + "bw_method, ind", + [ + ["scott", 20], + [None, 20], + [None, np.int_(20)], + [0.5, np.linspace(-100, 100, 20)], + ], + ) + def test_kde_kwargs(self, ts, bw_method, ind): + pytest.importorskip("scipy") + _check_plot_works(ts.plot.kde, bw_method=bw_method, ind=ind) + + def test_density_kwargs(self, ts): + pytest.importorskip("scipy") + sample_points = np.linspace(-100, 100, 20) + _check_plot_works(ts.plot.density, bw_method=0.5, ind=sample_points) + + def test_kde_kwargs_check_axes(self, ts): + pytest.importorskip("scipy") + _, ax = mpl.pyplot.subplots() + sample_points = np.linspace(-100, 100, 20) + ax = ts.plot.kde(logy=True, bw_method=0.5, ind=sample_points, ax=ax) + _check_ax_scales(ax, yaxis="log") + _check_text_labels(ax.yaxis.get_label(), "Density") + + def test_kde_missing_vals(self): + pytest.importorskip("scipy") + s = Series(np.random.default_rng(2).uniform(size=50)) + s[0] = np.nan + axes = _check_plot_works(s.plot.kde) + + # gh-14821: check if the values have any missing values + assert any(~np.isnan(axes.lines[0].get_xdata())) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + def test_boxplot_series(self, ts): + _, ax = mpl.pyplot.subplots() + ax = ts.plot.box(logy=True, ax=ax) + _check_ax_scales(ax, yaxis="log") + xlabels = ax.get_xticklabels() + _check_text_labels(xlabels, [ts.name]) + ylabels = ax.get_yticklabels() + _check_text_labels(ylabels, [""] * len(ylabels)) + + @pytest.mark.parametrize( + "kind", + plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds, + ) + def test_kind_kwarg(self, kind): + pytest.importorskip("scipy") + s = Series(range(3)) + _, ax = mpl.pyplot.subplots() + s.plot(kind=kind, ax=ax) + mpl.pyplot.close() + + @pytest.mark.parametrize( + "kind", + plotting.PlotAccessor._common_kinds + plotting.PlotAccessor._series_kinds, + ) + def test_kind_attr(self, kind): + pytest.importorskip("scipy") + s = Series(range(3)) + _, ax = mpl.pyplot.subplots() + getattr(s.plot, kind)() + mpl.pyplot.close() + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_invalid_plot_data(self, kind): + s = Series(list("abcd")) + _, ax = mpl.pyplot.subplots() + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + s.plot(kind=kind, ax=ax) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_valid_object_plot(self, kind): + pytest.importorskip("scipy") + s = Series(range(10), dtype=object) + _check_plot_works(s.plot, kind=kind) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_partially_invalid_plot_data(self, kind): + s = Series(["a", "b", 1.0, 2]) + _, ax = mpl.pyplot.subplots() + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + s.plot(kind=kind, ax=ax) + + def test_invalid_kind(self): + s = Series([1, 2]) + with pytest.raises(ValueError, match="invalid_kind is not a valid plot kind"): + s.plot(kind="invalid_kind") + + def test_dup_datetime_index_plot(self): + dr1 = date_range("1/1/2009", periods=4) + dr2 = date_range("1/2/2009", periods=4) + index = dr1.append(dr2) + values = np.random.default_rng(2).standard_normal(index.size) + s = Series(values, index=index) + _check_plot_works(s.plot) + + def test_errorbar_asymmetrical(self): + # GH9536 + s = Series(np.arange(10), name="x") + err = np.random.default_rng(2).random((2, 10)) + + ax = s.plot(yerr=err, xerr=err) + + result = np.vstack([i.vertices[:, 1] for i in ax.collections[1].get_paths()]) + expected = (err.T * np.array([-1, 1])) + s.to_numpy().reshape(-1, 1) + tm.assert_numpy_array_equal(result, expected) + + msg = ( + "Asymmetrical error bars should be provided " + f"with the shape \\(2, {len(s)}\\)" + ) + with pytest.raises(ValueError, match=msg): + s.plot(yerr=np.random.default_rng(2).random((2, 11))) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar"]) + @pytest.mark.parametrize( + "yerr", + [ + Series(np.abs(np.random.default_rng(2).standard_normal(10))), + np.abs(np.random.default_rng(2).standard_normal(10)), + list(np.abs(np.random.default_rng(2).standard_normal(10))), + DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), + columns=["x", "y"], + ), + ], + ) + def test_errorbar_plot(self, kind, yerr): + s = Series(np.arange(10), name="x") + ax = _check_plot_works(s.plot, yerr=yerr, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_yerr_0(self): + s = Series(np.arange(10), name="x") + s_err = np.abs(np.random.default_rng(2).standard_normal(10)) + ax = _check_plot_works(s.plot, xerr=s_err) + _check_has_errorbars(ax, xerr=1, yerr=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "yerr", + [ + Series(np.abs(np.random.default_rng(2).standard_normal(12))), + DataFrame( + np.abs(np.random.default_rng(2).standard_normal((12, 2))), + columns=["x", "y"], + ), + ], + ) + def test_errorbar_plot_ts(self, yerr): + # test time series plotting + ix = date_range("1/1/2000", "1/1/2001", freq="ME") + ts = Series(np.arange(12), index=ix, name="x") + yerr.index = ix + + ax = _check_plot_works(ts.plot, yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_invalid_yerr_shape(self): + s = Series(np.arange(10), name="x") + # check incorrect lengths and types + with tm.external_error_raised(ValueError): + s.plot(yerr=np.arange(11)) + + @pytest.mark.slow + def test_errorbar_plot_invalid_yerr(self): + s = Series(np.arange(10), name="x") + s_err = ["zzz"] * 10 + with tm.external_error_raised(TypeError): + s.plot(yerr=s_err) + + @pytest.mark.slow + def test_table_true(self, series): + _check_plot_works(series.plot, table=True) + + @pytest.mark.slow + def test_table_self(self, series): + _check_plot_works(series.plot, table=series) + + @pytest.mark.slow + def test_series_grid_settings(self): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + pytest.importorskip("scipy") + _check_grid_settings( + Series([1, 2, 3]), + plotting.PlotAccessor._series_kinds + plotting.PlotAccessor._common_kinds, + ) + + @pytest.mark.parametrize("c", ["r", "red", "green", "#FF0000"]) + def test_standard_colors(self, c): + from pandas.plotting._matplotlib.style import get_standard_colors + + result = get_standard_colors(1, color=c) + assert result == [c] + + result = get_standard_colors(1, color=[c]) + assert result == [c] + + result = get_standard_colors(3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(3, color=[c]) + assert result == [c] * 3 + + def test_standard_colors_all(self): + from matplotlib import colors + + from pandas.plotting._matplotlib.style import get_standard_colors + + # multiple colors like mediumaquamarine + for c in colors.cnames: + result = get_standard_colors(num_colors=1, color=c) + assert result == [c] + + result = get_standard_colors(num_colors=1, color=[c]) + assert result == [c] + + result = get_standard_colors(num_colors=3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(num_colors=3, color=[c]) + assert result == [c] * 3 + + # single letter colors like k + for c in colors.ColorConverter.colors: + result = get_standard_colors(num_colors=1, color=c) + assert result == [c] + + result = get_standard_colors(num_colors=1, color=[c]) + assert result == [c] + + result = get_standard_colors(num_colors=3, color=c) + assert result == [c] * 3 + + result = get_standard_colors(num_colors=3, color=[c]) + assert result == [c] * 3 + + def test_series_plot_color_kwargs(self): + # GH1890 + _, ax = mpl.pyplot.subplots() + ax = Series(np.arange(12) + 1).plot(color="green", ax=ax) + _check_colors(ax.get_lines(), linecolors=["green"]) + + def test_time_series_plot_color_kwargs(self): + # #1890 + _, ax = mpl.pyplot.subplots() + ax = Series(np.arange(12) + 1, index=date_range("1/1/2000", periods=12)).plot( + color="green", ax=ax + ) + _check_colors(ax.get_lines(), linecolors=["green"]) + + def test_time_series_plot_color_with_empty_kwargs(self): + import matplotlib as mpl + + def_colors = _unpack_cycler(mpl.rcParams) + index = date_range("1/1/2000", periods=12) + s = Series(np.arange(1, 13), index=index) + + ncolors = 3 + + _, ax = mpl.pyplot.subplots() + for i in range(ncolors): + ax = s.plot(ax=ax) + _check_colors(ax.get_lines(), linecolors=def_colors[:ncolors]) + + def test_xticklabels(self): + # GH11529 + s = Series(np.arange(10), index=[f"P{i:02d}" for i in range(10)]) + _, ax = mpl.pyplot.subplots() + ax = s.plot(xticks=[0, 3, 5, 9], ax=ax) + exp = [f"P{i:02d}" for i in [0, 3, 5, 9]] + _check_text_labels(ax.get_xticklabels(), exp) + + def test_xtick_barPlot(self): + # GH28172 + s = Series(range(10), index=[f"P{i:02d}" for i in range(10)]) + ax = s.plot.bar(xticks=range(0, 11, 2)) + exp = np.array(list(range(0, 11, 2))) + tm.assert_numpy_array_equal(exp, ax.get_xticks()) + + def test_custom_business_day_freq(self): + # GH7222 + from pandas.tseries.offsets import CustomBusinessDay + + s = Series( + range(100, 121), + index=pd.bdate_range( + start="2014-05-01", + end="2014-06-01", + freq=CustomBusinessDay(holidays=["2014-05-26"]), + ), + ) + + _check_plot_works(s.plot) + + @pytest.mark.xfail( + reason="GH#24426, see also " + "github.com/pandas-dev/pandas/commit/" + "ef1bd69fa42bbed5d09dd17f08c44fc8bfc2b685#r61470674" + ) + def test_plot_accessor_updates_on_inplace(self): + ser = Series([1, 2, 3, 4]) + _, ax = mpl.pyplot.subplots() + ax = ser.plot(ax=ax) + before = ax.xaxis.get_ticklocs() + + ser.drop([0, 1], inplace=True) + _, ax = mpl.pyplot.subplots() + after = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(before, after) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_plot_xlim_for_series(self, kind): + # test if xlim is also correctly plotted in Series for line and area + # GH 27686 + s = Series([2, 3]) + _, ax = mpl.pyplot.subplots() + s.plot(kind=kind, ax=ax) + xlims = ax.get_xlim() + + assert xlims[0] < 0 + assert xlims[1] > 1 + + def test_plot_no_rows(self): + # GH 27758 + df = Series(dtype=int) + assert df.empty + ax = df.plot() + assert len(ax.get_lines()) == 1 + line = ax.get_lines()[0] + assert len(line.get_xdata()) == 0 + assert len(line.get_ydata()) == 0 + + def test_plot_no_numeric_data(self): + df = Series(["a", "b", "c"]) + with pytest.raises(TypeError, match="no numeric data to plot"): + df.plot() + + @pytest.mark.parametrize( + "data, index", + [ + ([1, 2, 3, 4], [3, 2, 1, 0]), + ([10, 50, 20, 30], [1910, 1920, 1980, 1950]), + ], + ) + def test_plot_order(self, data, index): + # GH38865 Verify plot order of a Series + ser = Series(data=data, index=index) + ax = ser.plot(kind="bar") + + expected = ser.tolist() + result = [ + patch.get_bbox().ymax + for patch in sorted(ax.patches, key=lambda patch: patch.get_bbox().xmax) + ] + assert expected == result + + def test_style_single_ok(self): + s = Series([1, 2]) + ax = s.plot(style="s", color="C3") + assert ax.lines[0].get_color() == "C3" + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [(None, "", "new"), ("old", "old", "new"), (None, "", "")], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar", "barh", "hist"]) + def test_xlabel_ylabel_series(self, kind, index_name, old_label, new_label): + # GH 9093 + ser = Series([1, 2, 3, 4]) + ser.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name (reverse for barh) + ax = ser.plot(kind=kind) + if kind == "barh": + assert ax.get_xlabel() == "" + assert ax.get_ylabel() == old_label + elif kind == "hist": + assert ax.get_xlabel() == "" + assert ax.get_ylabel() == "Frequency" + else: + assert ax.get_ylabel() == "" + assert ax.get_xlabel() == old_label + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + ax = ser.plot(kind=kind, ylabel=new_label, xlabel=new_label) + assert ax.get_ylabel() == new_label + assert ax.get_xlabel() == new_label + + @pytest.mark.parametrize( + "index", + [ + pd.timedelta_range(start=0, periods=2, freq="D"), + [pd.Timedelta(days=1), pd.Timedelta(days=2)], + ], + ) + def test_timedelta_index(self, index): + # GH37454 + xlims = (3, 1) + ax = Series([1, 2], index=index).plot(xlim=(xlims)) + assert ax.get_xlim() == (3, 1) + + def test_series_none_color(self): + # GH51953 + series = Series([1, 2, 3]) + ax = series.plot(color=None) + expected = _unpack_cycler(mpl.pyplot.rcParams)[:1] + _check_colors(ax.get_lines(), linecolors=expected) + + @pytest.mark.slow + def test_plot_no_warning(self, ts): + # GH 55138 + # TODO(3.0): this can be removed once Period[B] deprecation is enforced + with tm.assert_produces_warning(False): + _ = ts.plot() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py new file mode 100644 index 0000000000000000000000000000000000000000..665bda15724fd67dc9917509d2b95957b03107e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_style.py @@ -0,0 +1,157 @@ +import pytest + +from pandas import Series + +pytest.importorskip("matplotlib") +from pandas.plotting._matplotlib.style import get_standard_colors + + +class TestGetStandardColors: + @pytest.mark.parametrize( + "num_colors, expected", + [ + (3, ["red", "green", "blue"]), + (5, ["red", "green", "blue", "red", "green"]), + (7, ["red", "green", "blue", "red", "green", "blue", "red"]), + (2, ["red", "green"]), + (1, ["red"]), + ], + ) + def test_default_colors_named_from_prop_cycle(self, num_colors, expected): + import matplotlib as mpl + from matplotlib.pyplot import cycler + + mpl_params = { + "axes.prop_cycle": cycler(color=["red", "green", "blue"]), + } + with mpl.rc_context(rc=mpl_params): + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["b"]), + (3, ["b", "g", "r"]), + (4, ["b", "g", "r", "y"]), + (5, ["b", "g", "r", "y", "b"]), + (7, ["b", "g", "r", "y", "b", "g", "r"]), + ], + ) + def test_default_colors_named_from_prop_cycle_string(self, num_colors, expected): + import matplotlib as mpl + from matplotlib.pyplot import cycler + + mpl_params = { + "axes.prop_cycle": cycler(color="bgry"), + } + with mpl.rc_context(rc=mpl_params): + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected_name", + [ + (1, ["C0"]), + (3, ["C0", "C1", "C2"]), + ( + 12, + [ + "C0", + "C1", + "C2", + "C3", + "C4", + "C5", + "C6", + "C7", + "C8", + "C9", + "C0", + "C1", + ], + ), + ], + ) + def test_default_colors_named_undefined_prop_cycle(self, num_colors, expected_name): + import matplotlib as mpl + import matplotlib.colors as mcolors + + with mpl.rc_context(rc={}): + expected = [mcolors.to_hex(x) for x in expected_name] + result = get_standard_colors(num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["red", "green", (0.1, 0.2, 0.3)]), + (2, ["red", "green", (0.1, 0.2, 0.3)]), + (3, ["red", "green", (0.1, 0.2, 0.3)]), + (4, ["red", "green", (0.1, 0.2, 0.3), "red"]), + ], + ) + def test_user_input_color_sequence(self, num_colors, expected): + color = ["red", "green", (0.1, 0.2, 0.3)] + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, ["r", "g", "b", "k"]), + (2, ["r", "g", "b", "k"]), + (3, ["r", "g", "b", "k"]), + (4, ["r", "g", "b", "k"]), + (5, ["r", "g", "b", "k", "r"]), + (6, ["r", "g", "b", "k", "r", "g"]), + ], + ) + def test_user_input_color_string(self, num_colors, expected): + color = "rgbk" + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "num_colors, expected", + [ + (1, [(0.1, 0.2, 0.3)]), + (2, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]), + (3, [(0.1, 0.2, 0.3), (0.1, 0.2, 0.3), (0.1, 0.2, 0.3)]), + ], + ) + def test_user_input_color_floats(self, num_colors, expected): + color = (0.1, 0.2, 0.3) + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize( + "color, num_colors, expected", + [ + ("Crimson", 1, ["Crimson"]), + ("DodgerBlue", 2, ["DodgerBlue", "DodgerBlue"]), + ("firebrick", 3, ["firebrick", "firebrick", "firebrick"]), + ], + ) + def test_user_input_named_color_string(self, color, num_colors, expected): + result = get_standard_colors(color=color, num_colors=num_colors) + assert result == expected + + @pytest.mark.parametrize("color", ["", [], (), Series([], dtype="object")]) + def test_empty_color_raises(self, color): + with pytest.raises(ValueError, match="Invalid color argument"): + get_standard_colors(color=color, num_colors=1) + + @pytest.mark.parametrize( + "color", + [ + "bad_color", + ("red", "green", "bad_color"), + (0.1,), + (0.1, 0.2), + (0.1, 0.2, 0.3, 0.4, 0.5), # must be either 3 or 4 floats + ], + ) + def test_bad_color_raises(self, color): + with pytest.raises(ValueError, match="Invalid color"): + get_standard_colors(color=color, num_colors=5) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b070e6094b961a36ceee2f38bb00c690f802335 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..941bdb83bb2372676042055220eb9a5b4634d50e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_attr_equal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_attr_equal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5aa4efa998b17a09f0fc7658f4ccab870d18997e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_attr_equal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_extension_array_equal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_extension_array_equal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..065f1b7511aadf84dee69f0eaa1db766ea23c566 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_extension_array_equal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_frame_equal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_frame_equal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a903a52f180c24623f7e2f80ed642bfe90f9ae2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_frame_equal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_index_equal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_index_equal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e07cd5762d17e0dc4f5b08641ac8f5d410501d1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_index_equal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_interval_array_equal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_interval_array_equal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0abd9d3500f821320d827227436af753c976fc4f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_interval_array_equal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_produces_warning.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_produces_warning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efb3a4122b59d05da7294f148aec47aa5ba79c68 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_produces_warning.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_series_equal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_series_equal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc22513e763d44b7f36fe8ad7c895df205e554ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_series_equal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_kwarg.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_kwarg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d69b9c8740aa19fa3b0092230730c3c0d596fd13 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_kwarg.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_nonkeyword_arguments.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_nonkeyword_arguments.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a73d869aa61a33dd62c186c5b6f4104ece10c54 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_nonkeyword_arguments.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_hashing.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_hashing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..065ed35ee9b3aa451657dbc813df21bbee4d3208 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_hashing.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_numba.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_numba.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35851dec4e1bc665bcab0385e6e0cf2e95d783ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_numba.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_shares_memory.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_shares_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f747a4f80549f6af9419d9cfce5090a31e74d56c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_shares_memory.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_util.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69aafc4b511166f0c959a42525c2dfb52d646b6f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_util.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_args.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_args.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..beeed53c127af49828720ff30a9cc0f5b83954ec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_args.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_args_and_kwargs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_args_and_kwargs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48ae4ce9664f98a6d1f28731f00649b01263ad84 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_args_and_kwargs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_inclusive.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_inclusive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5022b8aa463eacdf7e71abd70dd14bd55c34b07 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_inclusive.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_kwargs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_kwargs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd665f6d602e80f3627c3dec37edc57e4459e79c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_validate_kwargs.cpython-310.pyc differ