diff --git a/ckpts/tensorboard/events.out.tfevents.1713955089.peacock-3.29161.0 b/ckpts/tensorboard/events.out.tfevents.1713955089.peacock-3.29161.0 new file mode 100644 index 0000000000000000000000000000000000000000..6152435a52d0e69051a4e73170fc251620b7cfb0 --- /dev/null +++ b/ckpts/tensorboard/events.out.tfevents.1713955089.peacock-3.29161.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab325104bd88dc3a9fe1b7c137dd6f0fbb2b3e02ce835ed0186cc75dd680052e +size 216569 diff --git a/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..6ae13103f5f8a61ec0cfd043417565f2c9ee85e4 --- /dev/null +++ b/ckpts/universal/global_step80/zero/14.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:822c165438324adc02ec40f3f376cc6330a501cb17f3b5aa9fc6d434730cb67b +size 33555627 diff --git a/ckpts/universal/global_step80/zero/18.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/18.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..1a8a20cba3813e379a7b4e962a11ee8d0d8c3326 --- /dev/null +++ b/ckpts/universal/global_step80/zero/18.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8f91eede38a4e639ccfe2330bec2e8478fe9a6f5da7a7d3792a2164596e67cb +size 16778396 diff --git a/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..46bb51da151dd3e100389127178015067c768195 --- /dev/null +++ b/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a226f98e93c6dc8d51dca1aefd53c9920fe55740e13d5a6652d34c3db83bca96 +size 33555627 diff --git a/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..28ef4362bdcb91d01fea8b87d97e722efc1f16b8 --- /dev/null +++ b/ckpts/universal/global_step80/zero/26.mlp.dense_h_to_4h_swiglu.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:492cc30afeb946f8219e4d5e0dbb2419daf574050c9bfa6b018e97d4a9499e76 +size 33555533 diff --git a/ckpts/universal/global_step80/zero/5.attention.dense.weight/exp_avg.pt b/ckpts/universal/global_step80/zero/5.attention.dense.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..b44beb39706c9f86520fb632eed5e9464ce2192d --- /dev/null +++ b/ckpts/universal/global_step80/zero/5.attention.dense.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c169d1bef4ba4303a79edffa5be71e6867f0a000a87ff40add4279b6d9a53d8 +size 16778396 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/dtypes/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/dtypes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03c843e27a7d5be6cb08b3200590c61a3b513aea Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fe59ab7f9000651a2156c13ed47fea1eb9af6ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eae18a31fd558bec8583a9a64826e089d1dee72b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c4edd84c3421521328d07dbe2b20a239249a0895 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_boxplot_method.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2513f4fe1c4ea2b04475c0a50abbab3f1cc9d85a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..355f85b2987c2ebd7ec1068161f304d4e6e356d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88eb7ecdfb761eba2f7c02bbc40cf128a9ae7c57 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c49798025e76e3747284aa24b3038b6ebaffd4b6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_hist_method.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25d6df93a0d0ae972f21a610c287cb0915f28137 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_misc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e544dd819484048825b92565a1fa5b628fb0f15 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_series.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5c490b85e806954d3ce2e7e46bd90b953637beb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9047aa96d8bd33827c9d609528553c4cc3e66586 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..058604ecb08b29d5097df75b8b1beb34a3210f3a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aad22cc627ab4c03fa4f3d3011b01241c0d73883 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f14f742ebbf972ddc12fb59a73ff1984bea09ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bb7529ea09e5f1d0acae4718ccde2e6af36d20d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25502f323c53b34aabf9e442eeb278e55376096d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py new file mode 100644 index 0000000000000000000000000000000000000000..45dc612148f40ea29c7fac46b6b9d8edd29b17fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py @@ -0,0 +1,2592 @@ +""" Test cases for DataFrame.plot """ +from datetime import ( + date, + datetime, +) +import gc +import itertools +import re +import string +import weakref + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.api import is_list_like + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + PeriodIndex, + Series, + bdate_range, + date_range, + option_context, + plotting, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_ax_scales, + _check_axes_shape, + _check_box_return_type, + _check_colors, + _check_data, + _check_grid_settings, + _check_has_errorbars, + _check_legend_labels, + _check_plot_works, + _check_text_labels, + _check_ticks_props, + _check_visible, + get_y_axis, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestDataFramePlots: + @pytest.mark.slow + def test_plot(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + _check_plot_works(df.plot, grid=False) + + @pytest.mark.slow + def test_plot_subplots(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + # _check_plot_works adds an ax so use default_axes=True to avoid warning + axes = _check_plot_works(df.plot, default_axes=True, subplots=True) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + @pytest.mark.slow + def test_plot_subplots_negative_layout(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + axes = _check_plot_works( + df.plot, + default_axes=True, + subplots=True, + layout=(-1, 2), + ) + _check_axes_shape(axes, axes_num=4, layout=(2, 2)) + + @pytest.mark.slow + def test_plot_subplots_use_index(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + axes = _check_plot_works( + df.plot, + default_axes=True, + subplots=True, + use_index=False, + ) + _check_ticks_props(axes, xrot=0) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + @pytest.mark.xfail(reason="Api changed in 3.6.0") + @pytest.mark.slow + def test_plot_invalid_arg(self): + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + msg = "'Line2D' object has no property 'blarg'" + with pytest.raises(AttributeError, match=msg): + df.plot.line(blarg=True) + + @pytest.mark.slow + def test_plot_tick_props(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + ax = _check_plot_works(df.plot, use_index=True) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {"yticks": [1, 5, 10]}, + {"xticks": [1, 5, 10]}, + {"ylim": (-100, 100), "xlim": (-100, 100)}, + {"default_axes": True, "subplots": True, "title": "blah"}, + ], + ) + def test_plot_other_args(self, kwargs): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, **kwargs) + + @pytest.mark.slow + def test_plot_visible_ax(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + # We have to redo it here because _check_plot_works does two plots, + # once without an ax kwarg and once with an ax kwarg and the new sharex + # behaviour does not remove the visibility of the latter axis (as ax is + # present). see: https://github.com/pandas-dev/pandas/issues/9737 + + axes = df.plot(subplots=True, title="blah") + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + for ax in axes[:2]: + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible([ax.xaxis.get_label()], visible=False) + for ax in [axes[2]]: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible([ax.xaxis.get_label()]) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + def test_plot_title(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, title="blah") + + @pytest.mark.slow + def test_plot_multiindex(self): + tuples = zip(string.ascii_letters[:10], range(10)) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=MultiIndex.from_tuples(tuples), + ) + ax = _check_plot_works(df.plot, use_index=True) + _check_ticks_props(ax, xrot=0) + + @pytest.mark.slow + def test_plot_multiindex_unicode(self): + # unicode + index = MultiIndex.from_tuples( + [ + ("\u03b1", 0), + ("\u03b1", 1), + ("\u03b2", 2), + ("\u03b2", 3), + ("\u03b3", 4), + ("\u03b3", 5), + ("\u03b4", 6), + ("\u03b4", 7), + ], + names=["i0", "i1"], + ) + columns = MultiIndex.from_tuples( + [("bar", "\u0394"), ("bar", "\u0395")], names=["c0", "c1"] + ) + df = DataFrame( + np.random.default_rng(2).integers(0, 10, (8, 2)), + columns=columns, + index=index, + ) + _check_plot_works(df.plot, title="\u03A3") + + @pytest.mark.slow + @pytest.mark.parametrize("layout", [None, (-1, 1)]) + def test_plot_single_column_bar(self, layout): + # GH 6951 + # Test with single column + df = DataFrame({"x": np.random.default_rng(2).random(10)}) + axes = _check_plot_works(df.plot.bar, subplots=True, layout=layout) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.slow + def test_plot_passed_ax(self): + # When ax is supplied and required number of axes is 1, + # passed ax should be used: + df = DataFrame({"x": np.random.default_rng(2).random(10)}) + _, ax = mpl.pyplot.subplots() + axes = df.plot.bar(subplots=True, ax=ax) + assert len(axes) == 1 + result = ax.axes + assert result is axes[0] + + @pytest.mark.parametrize( + "cols, x, y", + [ + [list("ABCDE"), "A", "B"], + [["A", "B"], "A", "B"], + [["C", "A"], "C", "A"], + [["A", "C"], "A", "C"], + [["B", "C"], "B", "C"], + [["A", "D"], "A", "D"], + [["A", "E"], "A", "E"], + ], + ) + def test_nullable_int_plot(self, cols, x, y): + # GH 32073 + dates = ["2008", "2009", None, "2011", "2012"] + df = DataFrame( + { + "A": [1, 2, 3, 4, 5], + "B": [1, 2, 3, 4, 5], + "C": np.array([7, 5, np.nan, 3, 2], dtype=object), + "D": pd.to_datetime(dates, format="%Y").view("i8"), + "E": pd.to_datetime(dates, format="%Y", utc=True).view("i8"), + } + ) + + _check_plot_works(df[cols].plot, x=x, y=y) + + @pytest.mark.slow + @pytest.mark.parametrize("plot", ["line", "bar", "hist", "pie"]) + def test_integer_array_plot_series(self, plot): + # GH 25587 + arr = pd.array([1, 2, 3, 4], dtype="UInt32") + + s = Series(arr) + _check_plot_works(getattr(s.plot, plot)) + + @pytest.mark.slow + @pytest.mark.parametrize( + "plot, kwargs", + [ + ["line", {}], + ["bar", {}], + ["hist", {}], + ["pie", {"y": "y"}], + ["scatter", {"x": "x", "y": "y"}], + ["hexbin", {"x": "x", "y": "y"}], + ], + ) + def test_integer_array_plot_df(self, plot, kwargs): + # GH 25587 + arr = pd.array([1, 2, 3, 4], dtype="UInt32") + df = DataFrame({"x": arr, "y": arr}) + _check_plot_works(getattr(df.plot, plot), **kwargs) + + def test_nonnumeric_exclude(self): + df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]}) + ax = df.plot() + assert len(ax.get_lines()) == 1 # B was plotted + + def test_implicit_label(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + ax = df.plot(x="a", y="b") + _check_text_labels(ax.xaxis.get_label(), "a") + + def test_donot_overwrite_index_name(self): + # GH 8494 + df = DataFrame( + np.random.default_rng(2).standard_normal((2, 2)), columns=["a", "b"] + ) + df.index.name = "NAME" + df.plot(y="b", label="LABEL") + assert df.index.name == "NAME" + + def test_plot_xy(self): + # columns.inferred_type == 'string' + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + _check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot()) + _check_data(df.plot(x=0), df.set_index("A").plot()) + _check_data(df.plot(y=0), df.B.plot()) + _check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot()) + _check_data(df.plot(x="A"), df.set_index("A").plot()) + _check_data(df.plot(y="B"), df.B.plot()) + + def test_plot_xy_int_cols(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + # columns.inferred_type == 'integer' + df.columns = np.arange(1, len(df.columns) + 1) + _check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot()) + _check_data(df.plot(x=1), df.set_index(1).plot()) + _check_data(df.plot(y=1), df[1].plot()) + + def test_plot_xy_figsize_and_title(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + # figsize and title + ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8)) + _check_text_labels(ax.title, "Test") + _check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0)) + + # columns.inferred_type == 'mixed' + # TODO add MultiIndex test + + @pytest.mark.parametrize( + "input_log, expected_log", [(True, "log"), ("sym", "symlog")] + ) + def test_logscales(self, input_log, expected_log): + df = DataFrame({"a": np.arange(100)}, index=np.arange(100)) + + ax = df.plot(logy=input_log) + _check_ax_scales(ax, yaxis=expected_log) + assert ax.get_yscale() == expected_log + + ax = df.plot(logx=input_log) + _check_ax_scales(ax, xaxis=expected_log) + assert ax.get_xscale() == expected_log + + ax = df.plot(loglog=input_log) + _check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log) + assert ax.get_xscale() == expected_log + assert ax.get_yscale() == expected_log + + @pytest.mark.parametrize("input_param", ["logx", "logy", "loglog"]) + def test_invalid_logscale(self, input_param): + # GH: 24867 + df = DataFrame({"a": np.arange(100)}, index=np.arange(100)) + + msg = f"keyword '{input_param}' should be bool, None, or 'sym', not 'sm'" + with pytest.raises(ValueError, match=msg): + df.plot(**{input_param: "sm"}) + + msg = f"PiePlot ignores the '{input_param}' keyword" + with tm.assert_produces_warning(UserWarning, match=msg): + df.plot.pie(subplots=True, **{input_param: True}) + + def test_xcompat(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot(x_compat=True) + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_params(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + plotting.plot_params["xaxis.compat"] = True + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_params_x_compat(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + plotting.plot_params["x_compat"] = False + + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + msg = r"PeriodDtype\[B\] is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex) + + def test_xcompat_plot_params_context_manager(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + # useful if you're plotting a bunch together + with plotting.plot_params.use("x_compat", True): + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + _check_ticks_props(ax, xrot=30) + + def test_xcompat_plot_period(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + ax = df.plot() + lines = ax.get_lines() + assert not isinstance(lines[0].get_xdata(), PeriodIndex) + msg = r"PeriodDtype\[B\] is deprecated " + with tm.assert_produces_warning(FutureWarning, match=msg): + assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex) + _check_ticks_props(ax, xrot=0) + + def test_period_compat(self): + # GH 9012 + # period-array conversions + df = DataFrame( + np.random.default_rng(2).random((21, 2)), + index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)), + columns=["a", "b"], + ) + + df.plot() + mpl.pyplot.axhline(y=0) + + @pytest.mark.parametrize("index_dtype", [np.int64, np.float64]) + def test_unsorted_index(self, index_dtype): + df = DataFrame( + {"y": np.arange(100)}, + index=Index(np.arange(99, -1, -1), dtype=index_dtype), + dtype=np.int64, + ) + ax = df.plot() + lines = ax.get_lines()[0] + rs = lines.get_xydata() + rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y") + tm.assert_series_equal(rs, df.y, check_index_type=False) + + @pytest.mark.parametrize( + "df", + [ + DataFrame({"y": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0]), + DataFrame( + {"y": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]}, + index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0], + ), + ], + ) + def test_unsorted_index_lims(self, df): + ax = df.plot() + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data()[0]) + assert xmax >= np.nanmax(lines[0].get_data()[0]) + + def test_unsorted_index_lims_x_y(self): + df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0], "z": [91.0, 90.0, 93.0, 92.0]}) + ax = df.plot(x="z", y="y") + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= np.nanmin(lines[0].get_data()[0]) + assert xmax >= np.nanmax(lines[0].get_data()[0]) + + def test_negative_log(self): + df = -DataFrame( + np.random.default_rng(2).random((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + msg = "Log-y scales are not supported in area plot" + with pytest.raises(ValueError, match=msg): + df.plot.area(logy=True) + with pytest.raises(ValueError, match=msg): + df.plot.area(loglog=True) + + def _compare_stacked_y_cood(self, normal_lines, stacked_lines): + base = np.zeros(len(normal_lines[0].get_data()[1])) + for nl, sl in zip(normal_lines, stacked_lines): + base += nl.get_data()[1] # get y coordinates + sy = sl.get_data()[1] + tm.assert_numpy_array_equal(base, sy) + + @pytest.mark.parametrize("kind", ["line", "area"]) + @pytest.mark.parametrize("mult", [1, -1]) + def test_line_area_stacked(self, kind, mult): + df = mult * DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["w", "x", "y", "z"] + ) + + ax1 = _check_plot_works(df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines, ax2.lines) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_line_area_stacked_sep_df(self, kind): + # each column has either positive or negative value + sep_df = DataFrame( + { + "w": np.random.default_rng(2).random(6), + "x": np.random.default_rng(2).random(6), + "y": -np.random.default_rng(2).random(6), + "z": -np.random.default_rng(2).random(6), + } + ) + ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False) + ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True) + self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2]) + self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:]) + + def test_line_area_stacked_mixed(self): + mixed_df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["w", "x", "y", "z"], + ) + _check_plot_works(mixed_df.plot, stacked=False) + + msg = ( + "When stacked is True, each column must be either all positive or " + "all negative. Column 'w' contains both positive and negative " + "values" + ) + with pytest.raises(ValueError, match=msg): + mixed_df.plot(stacked=True) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_line_area_stacked_positive_idx(self, kind): + df = DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["w", "x", "y", "z"] + ) + # Use an index with strictly positive values, preventing + # matplotlib from warning about ignoring xlim + df2 = df.set_index(df.index + 1) + _check_plot_works(df2.plot, kind=kind, logx=True, stacked=True) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + def test_line_area_nan_df(self, idx): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + ax = _check_plot_works(df.plot) + masked1 = ax.lines[0].get_ydata() + masked2 = ax.lines[1].get_ydata() + # remove nan for comparison purpose + + exp = np.array([1, 2, 3], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp) + + exp = np.array([3, 2, 1], dtype=np.float64) + tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp) + tm.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False])) + tm.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False])) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + def test_line_area_nan_df_stacked(self, idx): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + expected1 = np.array([1, 2, 0, 3], dtype=np.float64) + expected2 = np.array([3, 0, 2, 1], dtype=np.float64) + + ax = _check_plot_works(df.plot, stacked=True) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) + + @pytest.mark.parametrize( + "idx", [range(4), date_range("2023-01-1", freq="D", periods=4)] + ) + @pytest.mark.parametrize("kwargs", [{}, {"stacked": False}]) + def test_line_area_nan_df_stacked_area(self, idx, kwargs): + values1 = [1, 2, np.nan, 3] + values2 = [3, np.nan, 2, 1] + df = DataFrame({"a": values1, "b": values2}, index=idx) + + expected1 = np.array([1, 2, 0, 3], dtype=np.float64) + expected2 = np.array([3, 0, 2, 1], dtype=np.float64) + + ax = _check_plot_works(df.plot.area, **kwargs) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + if kwargs: + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) + else: + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2) + + ax = _check_plot_works(df.plot.area, stacked=False) + tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1) + tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2) + + @pytest.mark.parametrize("kwargs", [{}, {"secondary_y": True}]) + def test_line_lim(self, kwargs): + df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=["x", "y", "z"]) + ax = df.plot(**kwargs) + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + + def test_line_lim_subplots(self): + df = DataFrame(np.random.default_rng(2).random((6, 3)), columns=["x", "y", "z"]) + axes = df.plot(secondary_y=True, subplots=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + for ax in axes: + assert hasattr(ax, "left_ax") + assert not hasattr(ax, "right_ax") + xmin, xmax = ax.get_xlim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + + @pytest.mark.xfail( + strict=False, + reason="2020-12-01 this has been failing periodically on the " + "ymin==0 assertion for a week or so.", + ) + @pytest.mark.parametrize("stacked", [True, False]) + def test_area_lim(self, stacked): + df = DataFrame( + np.random.default_rng(2).random((6, 4)), columns=["x", "y", "z", "four"] + ) + + neg_df = -df + + ax = _check_plot_works(df.plot.area, stacked=stacked) + xmin, xmax = ax.get_xlim() + ymin, ymax = ax.get_ylim() + lines = ax.get_lines() + assert xmin <= lines[0].get_data()[0][0] + assert xmax >= lines[0].get_data()[0][-1] + assert ymin == 0 + + ax = _check_plot_works(neg_df.plot.area, stacked=stacked) + ymin, ymax = ax.get_ylim() + assert ymax == 0 + + def test_area_sharey_dont_overwrite(self): + # GH37942 + df = DataFrame(np.random.default_rng(2).random((4, 2)), columns=["x", "y"]) + fig, (ax1, ax2) = mpl.pyplot.subplots(1, 2, sharey=True) + + df.plot(ax=ax1, kind="area") + df.plot(ax=ax2, kind="area") + + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + + @pytest.mark.parametrize("stacked", [True, False]) + def test_bar_linewidth(self, stacked): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot.bar(stacked=stacked, linewidth=2) + for r in ax.patches: + assert r.get_linewidth() == 2 + + def test_bar_linewidth_subplots(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # subplots + axes = df.plot.bar(linewidth=2, subplots=True) + _check_axes_shape(axes, axes_num=5, layout=(5, 1)) + for ax in axes: + for r in ax.patches: + assert r.get_linewidth() == 2 + + @pytest.mark.parametrize( + "meth, dim", [("bar", "get_width"), ("barh", "get_height")] + ) + @pytest.mark.parametrize("stacked", [True, False]) + def test_bar_barwidth(self, meth, dim, stacked): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + width = 0.9 + + ax = getattr(df.plot, meth)(stacked=stacked, width=width) + for r in ax.patches: + if not stacked: + assert getattr(r, dim)() == width / len(df.columns) + else: + assert getattr(r, dim)() == width + + @pytest.mark.parametrize( + "meth, dim", [("bar", "get_width"), ("barh", "get_height")] + ) + def test_barh_barwidth_subplots(self, meth, dim): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + width = 0.9 + + axes = getattr(df.plot, meth)(width=width, subplots=True) + for ax in axes: + for r in ax.patches: + assert getattr(r, dim)() == width + + def test_bar_bottom_left_bottom(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.bar(stacked=False, bottom=1) + result = [p.get_y() for p in ax.patches] + assert result == [1] * 25 + + ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5]) + result = [p.get_y() for p in ax.patches[:5]] + assert result == [-1, -2, -3, -4, -5] + + def test_bar_bottom_left_left(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1])) + result = [p.get_x() for p in ax.patches] + assert result == [1] * 25 + + ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5]) + result = [p.get_x() for p in ax.patches[:5]] + assert result == [1, 2, 3, 4, 5] + + def test_bar_bottom_left_subplots(self): + df = DataFrame(np.random.default_rng(2).random((5, 5))) + axes = df.plot.bar(subplots=True, bottom=-1) + for ax in axes: + result = [p.get_y() for p in ax.patches] + assert result == [-1] * 5 + + axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1])) + for ax in axes: + result = [p.get_x() for p in ax.patches] + assert result == [1] * 5 + + def test_bar_nan(self): + df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]}) + ax = df.plot.bar() + expected = [10, 0, 20, 5, 10, 20, 1, 2, 3] + result = [p.get_height() for p in ax.patches] + assert result == expected + + def test_bar_nan_stacked(self): + df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]}) + ax = df.plot.bar(stacked=True) + expected = [10, 0, 20, 5, 10, 20, 1, 2, 3] + result = [p.get_height() for p in ax.patches] + assert result == expected + + result = [p.get_y() for p in ax.patches] + expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0] + assert result == expected + + @pytest.mark.parametrize("idx", [Index, pd.CategoricalIndex]) + def test_bar_categorical(self, idx): + # GH 13019 + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 5)), + index=idx(list("ABCDEF")), + columns=idx(list("abcde")), + ) + + ax = df.plot.bar() + ticks = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5])) + assert ax.get_xlim() == (-0.5, 5.5) + # check left-edge of bars + assert ax.patches[0].get_x() == -0.25 + assert ax.patches[-1].get_x() == 5.15 + + ax = df.plot.bar(stacked=True) + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5])) + assert ax.get_xlim() == (-0.5, 5.5) + assert ax.patches[0].get_x() == -0.25 + assert ax.patches[-1].get_x() == 4.75 + + @pytest.mark.parametrize("x, y", [("x", "y"), (1, 2)]) + def test_plot_scatter(self, x, y): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + def test_plot_scatter_error(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + msg = re.escape("scatter() missing 1 required positional argument: 'y'") + with pytest.raises(TypeError, match=msg): + df.plot.scatter(x="x") + msg = re.escape("scatter() missing 1 required positional argument: 'x'") + with pytest.raises(TypeError, match=msg): + df.plot.scatter(y="y") + + def test_plot_scatter_shape(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + # GH 6951 + axes = df.plot(x="x", y="y", kind="scatter", subplots=True) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + def test_raise_error_on_datetime_time_data(self): + # GH 8113, datetime.time type is not supported by matplotlib in scatter + df = DataFrame(np.random.default_rng(2).standard_normal(10), columns=["a"]) + df["dtime"] = date_range(start="2014-01-01", freq="h", periods=10).time + msg = "must be a string or a (real )?number, not 'datetime.time'" + + with pytest.raises(TypeError, match=msg): + df.plot(kind="scatter", x="dtime", y="a") + + @pytest.mark.parametrize("x, y", [("dates", "vals"), (0, 1)]) + def test_scatterplot_datetime_data(self, x, y): + # GH 30391 + dates = date_range(start=date(2019, 1, 1), periods=12, freq="W") + vals = np.random.default_rng(2).normal(0, 1, len(dates)) + df = DataFrame({"dates": dates, "vals": vals}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + @pytest.mark.parametrize("x, y", [("a", "b"), (0, 1)]) + @pytest.mark.parametrize("b_col", [[2, 3, 4], ["a", "b", "c"]]) + def test_scatterplot_object_data(self, b_col, x, y, infer_string): + # GH 18755 + with option_context("future.infer_string", infer_string): + df = DataFrame({"a": ["A", "B", "C"], "b": b_col}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize("ordered", [True, False]) + @pytest.mark.parametrize( + "categories", + (["setosa", "versicolor", "virginica"], ["versicolor", "virginica", "setosa"]), + ) + def test_scatterplot_color_by_categorical(self, ordered, categories): + df = DataFrame( + [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]], + columns=["length", "width"], + ) + df["species"] = pd.Categorical( + ["setosa", "setosa", "virginica", "virginica", "versicolor"], + ordered=ordered, + categories=categories, + ) + ax = df.plot.scatter(x=0, y=1, c="species") + (colorbar_collection,) = ax.collections + colorbar = colorbar_collection.colorbar + + expected_ticks = np.array([0.5, 1.5, 2.5]) + result_ticks = colorbar.get_ticks() + tm.assert_numpy_array_equal(result_ticks, expected_ticks) + + expected_boundaries = np.array([0.0, 1.0, 2.0, 3.0]) + result_boundaries = colorbar._boundaries + tm.assert_numpy_array_equal(result_boundaries, expected_boundaries) + + expected_yticklabels = categories + result_yticklabels = [i.get_text() for i in colorbar.ax.get_ymajorticklabels()] + assert all(i == j for i, j in zip(result_yticklabels, expected_yticklabels)) + + @pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")]) + def test_plot_scatter_with_categorical_data(self, x, y): + # after fixing GH 18755, should be able to plot categorical data + df = DataFrame({"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])}) + + _check_plot_works(df.plot.scatter, x=x, y=y) + + @pytest.mark.parametrize("x, y, c", [("x", "y", "z"), (0, 1, 2)]) + def test_plot_scatter_with_c(self, x, y, c): + df = DataFrame( + np.random.default_rng(2).integers(low=0, high=100, size=(6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + + ax = df.plot.scatter(x=x, y=y, c=c) + # default to Greys + assert ax.collections[0].cmap.name == "Greys" + + assert ax.collections[0].colorbar.ax.get_ylabel() == "z" + + def test_plot_scatter_with_c_props(self): + df = DataFrame( + np.random.default_rng(2).integers(low=0, high=100, size=(6, 4)), + index=list(string.ascii_letters[:6]), + columns=["x", "y", "z", "four"], + ) + cm = "cubehelix" + ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm) + assert ax.collections[0].cmap.name == cm + + # verify turning off colorbar works + ax = df.plot.scatter(x="x", y="y", c="z", colorbar=False) + assert ax.collections[0].colorbar is None + + # verify that we can still plot a solid color + ax = df.plot.scatter(x=0, y=1, c="red") + assert ax.collections[0].colorbar is None + _check_colors(ax.collections, facecolors=["r"]) + + def test_plot_scatter_with_c_array(self): + # Ensure that we can pass an np.array straight through to matplotlib, + # this functionality was accidentally removed previously. + # See https://github.com/pandas-dev/pandas/issues/8852 for bug report + # + # Exercise colormap path and non-colormap path as they are independent + # + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + red_rgba = [1.0, 0.0, 0.0, 1.0] + green_rgba = [0.0, 1.0, 0.0, 1.0] + rgba_array = np.array([red_rgba, green_rgba]) + ax = df.plot.scatter(x="A", y="B", c=rgba_array) + # expect the face colors of the points in the non-colormap path to be + # identical to the values we supplied, normally we'd be on shaky ground + # comparing floats for equality but here we expect them to be + # identical. + tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array) + # we don't test the colors of the faces in this next plot because they + # are dependent on the spring colormap, which may change its colors + # later. + float_array = np.array([0.0, 1.0]) + df.plot.scatter(x="A", y="B", c=float_array, cmap="spring") + + def test_plot_scatter_with_s(self): + # this refers to GH 32904 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + + ax = df.plot.scatter(x="a", y="b", s="c") + tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes()) + + def test_plot_scatter_with_norm(self): + # added while fixing GH 45809 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + norm = mpl.colors.LogNorm() + ax = df.plot.scatter(x="a", y="b", c="c", norm=norm) + assert ax.collections[0].norm is norm + + def test_plot_scatter_without_norm(self): + # added while fixing GH 45809 + df = DataFrame( + np.random.default_rng(2).random((10, 3)) * 100, columns=["a", "b", "c"] + ) + ax = df.plot.scatter(x="a", y="b", c="c") + plot_norm = ax.collections[0].norm + color_min_max = (df.c.min(), df.c.max()) + default_norm = mpl.colors.Normalize(*color_min_max) + for value in df.c: + assert plot_norm(value) == default_norm(value) + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {}, + {"legend": False}, + {"default_axes": True, "subplots": True}, + {"stacked": True}, + ], + ) + def test_plot_bar(self, kwargs): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + + _check_plot_works(df.plot.bar, **kwargs) + + @pytest.mark.slow + def test_plot_bar_int_col(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 15)), + index=list(string.ascii_letters[:10]), + columns=range(15), + ) + _check_plot_works(df.plot.bar) + + @pytest.mark.slow + def test_plot_bar_ticks(self): + df = DataFrame({"a": [0, 1], "b": [1, 0]}) + ax = _check_plot_works(df.plot.bar) + _check_ticks_props(ax, xrot=90) + + ax = df.plot.bar(rot=35, fontsize=10) + _check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10) + + @pytest.mark.slow + def test_plot_barh_ticks(self): + df = DataFrame({"a": [0, 1], "b": [1, 0]}) + ax = _check_plot_works(df.plot.barh) + _check_ticks_props(ax, yrot=0) + + ax = df.plot.barh(rot=55, fontsize=11) + _check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11) + + def test_boxplot(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + + ax = _check_plot_works(df.plot.box) + _check_text_labels(ax.get_xticklabels(), labels) + tm.assert_numpy_array_equal( + ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1) + ) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_series(self, hist_df): + df = hist_df + series = df["height"] + axes = series.plot.box(rot=40) + _check_ticks_props(axes, xrot=40, yrot=0) + + _check_plot_works(series.plot.box) + + def test_boxplot_series_positions(self, hist_df): + df = hist_df + positions = np.array([1, 6, 7]) + ax = df.plot.box(positions=positions) + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + _check_text_labels(ax.get_xticklabels(), labels) + tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_vertical(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + + # if horizontal, yticklabels are rotated + ax = df.plot.box(rot=50, fontsize=8, vert=False) + _check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8) + _check_text_labels(ax.get_yticklabels(), labels) + assert len(ax.lines) == 7 * len(numeric_cols) + + @pytest.mark.filterwarnings("ignore:Attempt:UserWarning") + def test_boxplot_vertical_subplots(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + axes = _check_plot_works( + df.plot.box, + default_axes=True, + subplots=True, + vert=False, + logx=True, + ) + _check_axes_shape(axes, axes_num=3, layout=(1, 3)) + _check_ax_scales(axes, xaxis="log") + for ax, label in zip(axes, labels): + _check_text_labels(ax.get_yticklabels(), [label]) + assert len(ax.lines) == 7 + + def test_boxplot_vertical_positions(self, hist_df): + df = hist_df + numeric_cols = df._get_numeric_data().columns + labels = [pprint_thing(c) for c in numeric_cols] + positions = np.array([3, 2, 8]) + ax = df.plot.box(positions=positions, vert=False) + _check_text_labels(ax.get_yticklabels(), labels) + tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions) + assert len(ax.lines) == 7 * len(numeric_cols) + + def test_boxplot_return_type_invalid(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + msg = "return_type must be {None, 'axes', 'dict', 'both'}" + with pytest.raises(ValueError, match=msg): + df.plot.box(return_type="not_a_type") + + @pytest.mark.parametrize("return_type", ["dict", "axes", "both"]) + def test_boxplot_return_type_invalid_type(self, return_type): + df = DataFrame( + np.random.default_rng(2).standard_normal((6, 4)), + index=list(string.ascii_letters[:6]), + columns=["one", "two", "three", "four"], + ) + result = df.plot.box(return_type=return_type) + _check_box_return_type(result, return_type) + + def test_kde_df(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))) + ax = _check_plot_works(df.plot, kind="kde") + expected = [pprint_thing(c) for c in df.columns] + _check_legend_labels(ax, labels=expected) + _check_ticks_props(ax, xrot=0) + + def test_kde_df_rot(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + ax = df.plot(kind="kde", rot=20, fontsize=5) + _check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5) + + def test_kde_df_subplots(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + axes = _check_plot_works( + df.plot, + default_axes=True, + kind="kde", + subplots=True, + ) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + + def test_kde_df_logy(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + axes = df.plot(kind="kde", logy=True, subplots=True) + _check_ax_scales(axes, yaxis="log") + + def test_kde_missing_vals(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).uniform(size=(100, 4))) + df.loc[0, 0] = np.nan + _check_plot_works(df.plot, kind="kde") + + def test_hist_df(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))) + + ax = _check_plot_works(df.plot.hist) + expected = [pprint_thing(c) for c in df.columns] + _check_legend_labels(ax, labels=expected) + + axes = _check_plot_works( + df.plot.hist, + default_axes=True, + subplots=True, + logy=True, + ) + _check_axes_shape(axes, axes_num=4, layout=(4, 1)) + _check_ax_scales(axes, yaxis="log") + + def test_hist_df_series(self): + series = Series(np.random.default_rng(2).random(10)) + axes = series.plot.hist(rot=40) + _check_ticks_props(axes, xrot=40, yrot=0) + + def test_hist_df_series_cumulative_density(self): + from matplotlib.patches import Rectangle + + series = Series(np.random.default_rng(2).random(10)) + ax = series.plot.hist(cumulative=True, bins=4, density=True) + # height of last bin (index 5) must be 1.0 + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + tm.assert_almost_equal(rects[-1].get_height(), 1.0) + + def test_hist_df_series_cumulative(self): + from matplotlib.patches import Rectangle + + series = Series(np.random.default_rng(2).random(10)) + ax = series.plot.hist(cumulative=True, bins=4) + rects = [x for x in ax.get_children() if isinstance(x, Rectangle)] + + tm.assert_almost_equal(rects[-2].get_height(), 10.0) + + def test_hist_df_orientation(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + # if horizontal, yticklabels are rotated + axes = df.plot.hist(rot=50, fontsize=8, orientation="horizontal") + _check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8) + + @pytest.mark.parametrize( + "weights", [0.1 * np.ones(shape=(100,)), 0.1 * np.ones(shape=(100, 2))] + ) + def test_hist_weights(self, weights): + # GH 33173 + + df = DataFrame( + dict(zip(["A", "B"], np.random.default_rng(2).standard_normal((2, 100)))) + ) + + ax1 = _check_plot_works(df.plot, kind="hist", weights=weights) + ax2 = _check_plot_works(df.plot, kind="hist") + + patch_height_with_weights = [patch.get_height() for patch in ax1.patches] + + # original heights with no weights, and we manually multiply with example + # weights, so after multiplication, they should be almost same + expected_patch_height = [0.1 * patch.get_height() for patch in ax2.patches] + + tm.assert_almost_equal(patch_height_with_weights, expected_patch_height) + + def _check_box_coord( + self, + patches, + expected_y=None, + expected_h=None, + expected_x=None, + expected_w=None, + ): + result_y = np.array([p.get_y() for p in patches]) + result_height = np.array([p.get_height() for p in patches]) + result_x = np.array([p.get_x() for p in patches]) + result_width = np.array([p.get_width() for p in patches]) + # dtype is depending on above values, no need to check + + if expected_y is not None: + tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False) + if expected_h is not None: + tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False) + if expected_x is not None: + tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False) + if expected_w is not None: + tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False) + + @pytest.mark.parametrize( + "data", + [ + { + "A": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])), + "B": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])), + "C": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])), + }, + { + "A": np.repeat( + np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6]) + ), + "B": np.repeat( + np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8]) + ), + "C": np.repeat( + np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10]) + ), + }, + ], + ) + def test_hist_df_coord(self, data): + df = DataFrame(data) + + ax = df.plot.hist(bins=5) + self._check_box_coord( + ax.patches[:5], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + ax = df.plot.hist(bins=5, stacked=True) + self._check_box_coord( + ax.patches[:5], + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_y=np.array([10, 9, 8, 7, 6]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_y=np.array([18, 17, 16, 15, 14]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + axes = df.plot.hist(bins=5, stacked=True, subplots=True) + self._check_box_coord( + axes[0].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + axes[1].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + axes[2].patches, + expected_y=np.array([0, 0, 0, 0, 0]), + expected_h=np.array([6, 7, 8, 9, 10]), + ) + + # horizontal + ax = df.plot.hist(bins=5, orientation="horizontal") + self._check_box_coord( + ax.patches[:5], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + ax = df.plot.hist(bins=5, stacked=True, orientation="horizontal") + self._check_box_coord( + ax.patches[:5], + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + ax.patches[5:10], + expected_x=np.array([10, 9, 8, 7, 6]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + ax.patches[10:], + expected_x=np.array([18, 17, 16, 15, 14]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + axes = df.plot.hist( + bins=5, stacked=True, subplots=True, orientation="horizontal" + ) + self._check_box_coord( + axes[0].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([10, 9, 8, 7, 6]), + ) + self._check_box_coord( + axes[1].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([8, 8, 8, 8, 8]), + ) + self._check_box_coord( + axes[2].patches, + expected_x=np.array([0, 0, 0, 0, 0]), + expected_w=np.array([6, 7, 8, 9, 10]), + ) + + def test_plot_int_columns(self): + df = DataFrame(np.random.default_rng(2).standard_normal((100, 4))).cumsum() + _check_plot_works(df.plot, legend=True) + + @pytest.mark.parametrize( + "markers", + [ + {0: "^", 1: "+", 2: "o"}, + {0: "^", 1: "+"}, + ["^", "+", "o"], + ["^", "+"], + ], + ) + def test_style_by_column(self, markers): + import matplotlib.pyplot as plt + + fig = plt.gcf() + fig.clf() + fig.add_subplot(111) + df = DataFrame(np.random.default_rng(2).standard_normal((10, 3))) + ax = df.plot(style=markers) + for idx, line in enumerate(ax.get_lines()[: len(markers)]): + assert line.get_marker() == markers[idx] + + def test_line_label_none(self): + s = Series([1, 2]) + ax = s.plot() + assert ax.get_legend() is None + + ax = s.plot(legend=True) + assert ax.get_legend().get_texts()[0].get_text() == "" + + @pytest.mark.parametrize( + "props, expected", + [ + ("boxprops", "boxes"), + ("whiskerprops", "whiskers"), + ("capprops", "caps"), + ("medianprops", "medians"), + ], + ) + def test_specified_props_kwd_plot_box(self, props, expected): + # GH 30346 + df = DataFrame({k: np.random.default_rng(2).random(100) for k in "ABC"}) + kwd = {props: {"color": "C1"}} + result = df.plot.box(return_type="dict", **kwd) + + assert result[expected][0].get_color() == "C1" + + def test_unordered_ts(self): + # GH#2609, GH#55906 + index = [date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)] + values = [3.0, 2.0, 1.0] + df = DataFrame( + np.array(values), + index=index, + columns=["test"], + ) + ax = df.plot() + xticks = ax.lines[0].get_xdata() + tm.assert_numpy_array_equal(xticks, np.array(index, dtype=object)) + ydata = ax.lines[0].get_ydata() + tm.assert_numpy_array_equal(ydata, np.array(values)) + + # even though we don't sort the data before passing it to matplotlib, + # the ticks are sorted + xticks = ax.xaxis.get_ticklabels() + xlocs = [x.get_position()[0] for x in xticks] + assert Index(xlocs).is_monotonic_increasing + xlabels = [x.get_text() for x in xticks] + assert pd.to_datetime(xlabels, format="%Y-%m-%d").is_monotonic_increasing + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_kind_both_ways(self, kind): + pytest.importorskip("scipy") + df = DataFrame({"x": [1, 2, 3]}) + df.plot(kind=kind) + getattr(df.plot, kind)() + + @pytest.mark.parametrize("kind", ["scatter", "hexbin"]) + def test_kind_both_ways_x_y(self, kind): + pytest.importorskip("scipy") + df = DataFrame({"x": [1, 2, 3]}) + df.plot("x", "x", kind=kind) + getattr(df.plot, kind)("x", "x") + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._common_kinds) + def test_all_invalid_plot_data(self, kind): + df = DataFrame(list("abcd")) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df.plot(kind=kind) + + @pytest.mark.parametrize( + "kind", list(plotting.PlotAccessor._common_kinds) + ["area"] + ) + def test_partially_invalid_plot_data_numeric(self, kind): + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + dtype=object, + ) + df[np.random.default_rng(2).random(df.shape[0]) > 0.5] = "a" + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + df.plot(kind=kind) + + def test_invalid_kind(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + msg = "invalid_plot_kind is not a valid plot kind" + with pytest.raises(ValueError, match=msg): + df.plot(kind="invalid_plot_kind") + + @pytest.mark.parametrize( + "x,y,lbl", + [ + (["B", "C"], "A", "a"), + (["A"], ["B", "C"], ["b", "c"]), + ], + ) + def test_invalid_xy_args(self, x, y, lbl): + # GH 18671, 19699 allows y to be list-like but not x + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + with pytest.raises(ValueError, match="x must be a label or position"): + df.plot(x=x, y=y, label=lbl) + + def test_bad_label(self): + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + msg = "label should be list-like and same length as y" + with pytest.raises(ValueError, match=msg): + df.plot(x="A", y=["B", "C"], label="bad_label") + + @pytest.mark.parametrize("x,y", [("A", "B"), (["A"], "B")]) + def test_invalid_xy_args_dup_cols(self, x, y): + # GH 18671, 19699 allows y to be list-like but not x + df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list("AAB")) + with pytest.raises(ValueError, match="x must be a label or position"): + df.plot(x=x, y=y) + + @pytest.mark.parametrize( + "x,y,lbl,colors", + [ + ("A", ["B"], ["b"], ["red"]), + ("A", ["B", "C"], ["b", "c"], ["red", "blue"]), + (0, [1, 2], ["bokeh", "cython"], ["green", "yellow"]), + ], + ) + def test_y_listlike(self, x, y, lbl, colors): + # GH 19699: tests list-like y and verifies lbls & colors + df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}) + _check_plot_works(df.plot, x="A", y=y, label=lbl) + + ax = df.plot(x=x, y=y, label=lbl, color=colors) + assert len(ax.lines) == len(y) + _check_colors(ax.get_lines(), linecolors=colors) + + @pytest.mark.parametrize("x,y,colnames", [(0, 1, ["A", "B"]), (1, 0, [0, 1])]) + def test_xy_args_integer(self, x, y, colnames): + # GH 20056: tests integer args for xy and checks col names + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + df.columns = colnames + _check_plot_works(df.plot, x=x, y=y) + + def test_hexbin_basic(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + + ax = df.plot.hexbin(x="A", y="B", gridsize=10) + # TODO: need better way to test. This just does existence. + assert len(ax.collections) == 1 + + def test_hexbin_basic_subplots(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + # GH 6951 + axes = df.plot.hexbin(x="A", y="B", subplots=True) + # hexbin should have 2 axes in the figure, 1 for plotting and another + # is colorbar + assert len(axes[0].figure.axes) == 2 + # return value is single axes + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + + @pytest.mark.parametrize("reduce_C", [None, np.std]) + def test_hexbin_with_c(self, reduce_C): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + + ax = df.plot.hexbin(x="A", y="B", C="C", reduce_C_function=reduce_C) + assert len(ax.collections) == 1 + + @pytest.mark.parametrize( + "kwargs, expected", + [ + ({}, "BuGn"), # default cmap + ({"colormap": "cubehelix"}, "cubehelix"), + ({"cmap": "YlGn"}, "YlGn"), + ], + ) + def test_hexbin_cmap(self, kwargs, expected): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + ax = df.plot.hexbin(x="A", y="B", **kwargs) + assert ax.collections[0].cmap.name == expected + + def test_pie_df_err(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + msg = "pie requires either y column or 'subplots=True'" + with pytest.raises(ValueError, match=msg): + df.plot.pie() + + @pytest.mark.parametrize("y", ["Y", 2]) + def test_pie_df(self, y): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + ax = _check_plot_works(df.plot.pie, y=y) + _check_text_labels(ax.texts, df.index) + + def test_pie_df_subplots(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + axes = _check_plot_works( + df.plot.pie, + default_axes=True, + subplots=True, + ) + assert len(axes) == len(df.columns) + for ax in axes: + _check_text_labels(ax.texts, df.index) + for ax, ylabel in zip(axes, df.columns): + assert ax.get_ylabel() == ylabel + + def test_pie_df_labels_colors(self): + df = DataFrame( + np.random.default_rng(2).random((5, 3)), + columns=["X", "Y", "Z"], + index=["a", "b", "c", "d", "e"], + ) + labels = ["A", "B", "C", "D", "E"] + color_args = ["r", "g", "b", "c", "m"] + axes = _check_plot_works( + df.plot.pie, + default_axes=True, + subplots=True, + labels=labels, + colors=color_args, + ) + assert len(axes) == len(df.columns) + + for ax in axes: + _check_text_labels(ax.texts, labels) + _check_colors(ax.patches, facecolors=color_args) + + def test_pie_df_nan(self): + df = DataFrame(np.random.default_rng(2).random((4, 4))) + for i in range(4): + df.iloc[i, i] = np.nan + _, axes = mpl.pyplot.subplots(ncols=4) + + # GH 37668 + kwargs = {"normalize": True} + + with tm.assert_produces_warning(None): + df.plot.pie(subplots=True, ax=axes, legend=True, **kwargs) + + base_expected = ["0", "1", "2", "3"] + for i, ax in enumerate(axes): + expected = list(base_expected) # force copy + expected[i] = "" + result = [x.get_text() for x in ax.texts] + assert result == expected + + # legend labels + # NaN's not included in legend with subplots + # see https://github.com/pandas-dev/pandas/issues/8390 + result_labels = [x.get_text() for x in ax.get_legend().get_texts()] + expected_labels = base_expected[:i] + base_expected[i + 1 :] + assert result_labels == expected_labels + + @pytest.mark.slow + @pytest.mark.parametrize( + "kwargs", + [ + {"logy": True}, + {"logx": True, "logy": True}, + {"loglog": True}, + ], + ) + def test_errorbar_plot(self, kwargs): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + df_err = DataFrame(d_err) + + # check line plots + ax = _check_plot_works(df.plot, yerr=df_err, **kwargs) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + def test_errorbar_plot_bar(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + df_err = DataFrame(d_err) + ax = _check_plot_works( + (df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True + ) + _check_has_errorbars(ax, xerr=2, yerr=2) + + @pytest.mark.slow + def test_errorbar_plot_yerr_array(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + # yerr is raw error values + ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("yerr", ["yerr", "誤差"]) + def test_errorbar_plot_column_name(self, yerr): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + df[yerr] = np.ones(12) * 0.2 + + ax = _check_plot_works(df.plot, yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(df.plot, y="y", x="x", yerr=yerr) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + def test_errorbar_plot_external_valueerror(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + with tm.external_error_raised(ValueError): + df.plot(yerr=np.random.default_rng(2).standard_normal(11)) + + @pytest.mark.slow + def test_errorbar_plot_external_typeerror(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12}) + with tm.external_error_raised(TypeError): + df.plot(yerr=df_err) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + @pytest.mark.parametrize( + "y_err", + [ + Series(np.ones(12) * 0.2, name="x"), + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + ], + ) + def test_errorbar_plot_different_yerr(self, kind, y_err): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + + ax = _check_plot_works(df.plot, yerr=y_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + @pytest.mark.parametrize( + "y_err, x_err", + [ + ( + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}), + ), + (Series(np.ones(12) * 0.2, name="x"), Series(np.ones(12) * 0.2, name="x")), + (0.2, 0.2), + ], + ) + def test_errorbar_plot_different_yerr_xerr(self, kind, y_err, x_err): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + ax = _check_plot_works(df.plot, yerr=y_err, xerr=x_err, kind=kind) + _check_has_errorbars(ax, xerr=2, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + def test_errorbar_plot_different_yerr_xerr_subplots(self, kind): + df = DataFrame({"x": np.arange(12), "y": np.arange(12, 0, -1)}) + df_err = DataFrame({"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}) + axes = _check_plot_works( + df.plot, + default_axes=True, + yerr=df_err, + xerr=df_err, + subplots=True, + kind=kind, + ) + _check_has_errorbars(axes, xerr=1, yerr=1) + + @pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError) + def test_errorbar_plot_iterator(self): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + + # yerr is iterator + ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df))) + _check_has_errorbars(ax, xerr=0, yerr=2) + + def test_errorbar_with_integer_column_names(self): + # test with integer column names + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 2)))) + df_err = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 2)))) + ax = _check_plot_works(df.plot, yerr=df_err) + _check_has_errorbars(ax, xerr=0, yerr=2) + ax = _check_plot_works(df.plot, y=0, yerr=1) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["line", "bar"]) + def test_errorbar_with_partial_columns_kind(self, kind): + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 3)))) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), columns=[0, 2] + ) + ax = _check_plot_works(df.plot, yerr=df_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + def test_errorbar_with_partial_columns_dti(self): + df = DataFrame(np.abs(np.random.default_rng(2).standard_normal((10, 3)))) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 2))), columns=[0, 2] + ) + ix = date_range("1/1/2000", periods=10, freq="ME") + df.set_index(ix, inplace=True) + df_err.set_index(ix, inplace=True) + ax = _check_plot_works(df.plot, yerr=df_err, kind="line") + _check_has_errorbars(ax, xerr=0, yerr=2) + + @pytest.mark.slow + @pytest.mark.parametrize("err_box", [lambda x: x, DataFrame]) + def test_errorbar_with_partial_columns_box(self, err_box): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + df = DataFrame(d) + err = err_box({"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4}) + ax = _check_plot_works(df.plot, yerr=err) + _check_has_errorbars(ax, xerr=0, yerr=1) + + @pytest.mark.parametrize("kind", ["line", "bar", "barh"]) + def test_errorbar_timeseries(self, kind): + d = {"x": np.arange(12), "y": np.arange(12, 0, -1)} + d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4} + + # check time-series plots + ix = date_range("1/1/2000", "1/1/2001", freq="ME") + tdf = DataFrame(d, index=ix) + tdf_err = DataFrame(d_err, index=ix) + + ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=1) + + ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind) + _check_has_errorbars(ax, xerr=0, yerr=2) + + axes = _check_plot_works( + tdf.plot, + default_axes=True, + kind=kind, + yerr=tdf_err, + subplots=True, + ) + _check_has_errorbars(axes, xerr=0, yerr=1) + + def test_errorbar_asymmetrical(self): + err = np.random.default_rng(2).random((3, 2, 5)) + + # each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]... + df = DataFrame(np.arange(15).reshape(3, 5)).T + + ax = df.plot(yerr=err, xerr=err / 2) + + yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1] + expected_0_0 = err[0, :, 0] * np.array([-1, 1]) + tm.assert_almost_equal(yerr_0_0, expected_0_0) + + msg = re.escape( + "Asymmetrical error bars should be provided with the shape (3, 2, 5)" + ) + with pytest.raises(ValueError, match=msg): + df.plot(yerr=err.T) + + def test_table(self): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + _check_plot_works(df.plot, table=True) + _check_plot_works(df.plot, table=df) + + # GH 35945 UserWarning + with tm.assert_produces_warning(None): + ax = df.plot() + assert len(ax.tables) == 0 + plotting.table(ax, df.T) + assert len(ax.tables) == 1 + + def test_errorbar_scatter(self): + df = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((5, 2))), + index=range(5), + columns=["x", "y"], + ) + df_err = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((5, 2))) / 5, + index=range(5), + columns=["x", "y"], + ) + + ax = _check_plot_works(df.plot.scatter, x="x", y="y") + _check_has_errorbars(ax, xerr=0, yerr=0) + ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err) + _check_has_errorbars(ax, xerr=1, yerr=0) + + ax = _check_plot_works(df.plot.scatter, x="x", y="y", yerr=df_err) + _check_has_errorbars(ax, xerr=0, yerr=1) + ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err, yerr=df_err) + _check_has_errorbars(ax, xerr=1, yerr=1) + + def test_errorbar_scatter_color(self): + def _check_errorbar_color(containers, expected, has_err="has_xerr"): + lines = [] + errs = next(c.lines for c in ax.containers if getattr(c, has_err, False)) + for el in errs: + if is_list_like(el): + lines.extend(el) + else: + lines.append(el) + err_lines = [x for x in lines if x in ax.collections] + _check_colors(err_lines, linecolors=np.array([expected] * len(err_lines))) + + # GH 8081 + df = DataFrame( + np.abs(np.random.default_rng(2).standard_normal((10, 5))), + columns=["a", "b", "c", "d", "e"], + ) + ax = df.plot.scatter(x="a", y="b", xerr="d", yerr="e", c="red") + _check_has_errorbars(ax, xerr=1, yerr=1) + _check_errorbar_color(ax.containers, "red", has_err="has_xerr") + _check_errorbar_color(ax.containers, "red", has_err="has_yerr") + + ax = df.plot.scatter(x="a", y="b", yerr="e", color="green") + _check_has_errorbars(ax, xerr=0, yerr=1) + _check_errorbar_color(ax.containers, "green", has_err="has_yerr") + + def test_scatter_unknown_colormap(self): + # GH#48726 + df = DataFrame({"a": [1, 2, 3], "b": 4}) + with pytest.raises((ValueError, KeyError), match="'unknown' is not a"): + df.plot(x="a", y="b", colormap="unknown", kind="scatter") + + def test_sharex_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + plt.close("all") + gs, axes = _generate_4_axes_via_gridspec() + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + + def _check(axes): + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[0], axes[2]]: + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + for ax in [axes[1], axes[3]]: + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax, sharex=True) + gs.tight_layout(plt.gcf()) + _check(axes) + plt.close("all") + + gs, axes = _generate_4_axes_via_gridspec() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True) + _check(axes) + + def test_sharex_false_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + gs, axes = _generate_4_axes_via_gridspec() + # without sharex, no labels should be touched! + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax) + + gs.tight_layout(plt.gcf()) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + def test_sharey_and_ax(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + # the axis in fig.get_axis() are sorted differently than pandas + # expected them, so make sure that only the right ones are removed + import matplotlib.pyplot as plt + + gs, axes = _generate_4_axes_via_gridspec() + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + + def _check(axes): + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + for ax in [axes[0], axes[1]]: + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[2], axes[3]]: + _check_visible(ax.get_yticklabels(), visible=False) + + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax, sharey=True) + gs.tight_layout(plt.gcf()) + _check(axes) + plt.close("all") + + gs, axes = _generate_4_axes_via_gridspec() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharey=True) + + gs.tight_layout(plt.gcf()) + _check(axes) + + def test_sharey_and_ax_tight(self): + # https://github.com/pandas-dev/pandas/issues/9737 using gridspec, + import matplotlib.pyplot as plt + + df = DataFrame( + { + "a": [1, 2, 3, 4, 5, 6], + "b": [1, 2, 3, 4, 5, 6], + "c": [1, 2, 3, 4, 5, 6], + "d": [1, 2, 3, 4, 5, 6], + } + ) + gs, axes = _generate_4_axes_via_gridspec() + # without sharex, no labels should be touched! + for ax in axes: + df.plot(x="a", y="b", title="title", ax=ax) + + gs.tight_layout(plt.gcf()) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + @pytest.mark.parametrize("kind", plotting.PlotAccessor._all_kinds) + def test_memory_leak(self, kind): + """Check that every plot type gets properly collected.""" + pytest.importorskip("scipy") + args = {} + if kind in ["hexbin", "scatter", "pie"]: + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + args = {"x": "A", "y": "B"} + elif kind == "area": + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ).abs() + else: + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + + # Use a weakref so we can see if the object gets collected without + # also preventing it from being collected + ref = weakref.ref(df.plot(kind=kind, **args)) + + # have matplotlib delete all the figures + plt.close("all") + # force a garbage collection + gc.collect() + assert ref() is None + + def test_df_gridspec_patterns_vert_horiz(self): + # GH 10819 + from matplotlib import gridspec + import matplotlib.pyplot as plt + + ts = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1/1/2000", periods=10), + ) + + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=ts.index, + columns=list("AB"), + ) + + def _get_vertical_grid(): + gs = gridspec.GridSpec(3, 1) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:2, :]) + ax2 = fig.add_subplot(gs[2, :]) + return ax1, ax2 + + def _get_horizontal_grid(): + gs = gridspec.GridSpec(1, 3) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:, :2]) + ax2 = fig.add_subplot(gs[:, 2]) + return ax1, ax2 + + for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]: + ax1 = ts.plot(ax=ax1) + assert len(ax1.lines) == 1 + ax2 = df.plot(ax=ax2) + assert len(ax2.lines) == 2 + for ax in [ax1, ax2]: + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # subplots=True + for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]: + axes = df.plot(subplots=True, ax=[ax1, ax2]) + assert len(ax1.lines) == 1 + assert len(ax2.lines) == 1 + for ax in axes: + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # vertical / subplots / sharex=True / sharey=True + ax1, ax2 = _get_vertical_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True) + assert len(axes[0].lines) == 1 + assert len(axes[1].lines) == 1 + for ax in [ax1, ax2]: + # yaxis are visible because there is only one column + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of axes0 (top) are hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + plt.close("all") + + # horizontal / subplots / sharex=True / sharey=True + ax1, ax2 = _get_horizontal_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True) + assert len(axes[0].lines) == 1 + assert len(axes[1].lines) == 1 + _check_visible(axes[0].get_yticklabels(), visible=True) + # yaxis of axes1 (right) are hidden + _check_visible(axes[1].get_yticklabels(), visible=False) + for ax in [ax1, ax2]: + # xaxis are visible because there is only one column + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + def test_df_gridspec_patterns_boxed(self): + # GH 10819 + from matplotlib import gridspec + import matplotlib.pyplot as plt + + ts = Series( + np.random.default_rng(2).standard_normal(10), + index=date_range("1/1/2000", periods=10), + ) + + # boxed + def _get_boxed_grid(): + gs = gridspec.GridSpec(3, 3) + fig = plt.figure() + ax1 = fig.add_subplot(gs[:2, :2]) + ax2 = fig.add_subplot(gs[:2, 2]) + ax3 = fig.add_subplot(gs[2, :2]) + ax4 = fig.add_subplot(gs[2, 2]) + return ax1, ax2, ax3, ax4 + + axes = _get_boxed_grid() + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=ts.index, + columns=list("ABCD"), + ) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + # axis are visible because these are not shared + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + # subplots / sharex=True / sharey=True + axes = _get_boxed_grid() + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True) + for ax in axes: + assert len(ax.lines) == 1 + for ax in [axes[0], axes[2]]: # left column + _check_visible(ax.get_yticklabels(), visible=True) + for ax in [axes[1], axes[3]]: # right column + _check_visible(ax.get_yticklabels(), visible=False) + for ax in [axes[0], axes[1]]: # top row + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + for ax in [axes[2], axes[3]]: # bottom row + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + plt.close("all") + + def test_df_grid_settings(self): + # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792 + _check_grid_settings( + DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}), + plotting.PlotAccessor._dataframe_kinds, + kws={"x": "a", "y": "b"}, + ) + + def test_plain_axes(self): + # supplied ax itself is a SubplotAxes, but figure contains also + # a plain Axes object (GH11556) + fig, ax = mpl.pyplot.subplots() + fig.add_axes([0.2, 0.2, 0.2, 0.2]) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + + def test_plain_axes_df(self): + # supplied ax itself is a plain Axes, but because the cmap keyword + # a new ax is created for the colorbar -> also multiples axes (GH11520) + df = DataFrame( + { + "a": np.random.default_rng(2).standard_normal(8), + "b": np.random.default_rng(2).standard_normal(8), + } + ) + fig = mpl.pyplot.figure() + ax = fig.add_axes((0, 0, 1, 1)) + df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv") + + def test_plain_axes_make_axes_locatable(self): + # other examples + fig, ax = mpl.pyplot.subplots() + from mpl_toolkits.axes_grid1 import make_axes_locatable + + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size="5%", pad=0.05) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + Series(np.random.default_rng(2).random(10)).plot(ax=cax) + + def test_plain_axes_make_inset_axes(self): + fig, ax = mpl.pyplot.subplots() + from mpl_toolkits.axes_grid1.inset_locator import inset_axes + + iax = inset_axes(ax, width="30%", height=1.0, loc=3) + Series(np.random.default_rng(2).random(10)).plot(ax=ax) + Series(np.random.default_rng(2).random(10)).plot(ax=iax) + + @pytest.mark.parametrize("method", ["line", "barh", "bar"]) + def test_secondary_axis_font_size(self, method): + # GH: 12565 + df = ( + DataFrame( + np.random.default_rng(2).standard_normal((15, 2)), columns=list("AB") + ) + .assign(C=lambda df: df.B.cumsum()) + .assign(D=lambda df: df.C * 1.1) + ) + + fontsize = 20 + sy = ["C", "D"] + + kwargs = {"secondary_y": sy, "fontsize": fontsize, "mark_right": True} + ax = getattr(df.plot, method)(**kwargs) + _check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize) + + def test_x_string_values_ticks(self): + # Test if string plot index have a fixed xtick position + # GH: 7612, GH: 22334 + df = DataFrame( + { + "sales": [3, 2, 3], + "visits": [20, 42, 28], + "day": ["Monday", "Tuesday", "Wednesday"], + } + ) + ax = df.plot.area(x="day") + ax.set_xlim(-1, 3) + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + labels_position = dict(zip(xticklabels, ax.get_xticks())) + # Testing if the label stayed at the right position + assert labels_position["Monday"] == 0.0 + assert labels_position["Tuesday"] == 1.0 + assert labels_position["Wednesday"] == 2.0 + + def test_x_multiindex_values_ticks(self): + # Test if multiindex plot index have a fixed xtick position + # GH: 15912 + index = MultiIndex.from_product([[2012, 2013], [1, 2]]) + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 2)), + columns=["A", "B"], + index=index, + ) + ax = df.plot() + ax.set_xlim(-1, 4) + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + labels_position = dict(zip(xticklabels, ax.get_xticks())) + # Testing if the label stayed at the right position + assert labels_position["(2012, 1)"] == 0.0 + assert labels_position["(2012, 2)"] == 1.0 + assert labels_position["(2013, 1)"] == 2.0 + assert labels_position["(2013, 2)"] == 3.0 + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_xlim_plot_line(self, kind): + # test if xlim is set correctly in plot.line and plot.area + # GH 27686 + df = DataFrame([2, 4], index=[1, 2]) + ax = df.plot(kind=kind) + xlims = ax.get_xlim() + assert xlims[0] < 1 + assert xlims[1] > 2 + + def test_xlim_plot_line_correctly_in_mixed_plot_type(self): + # test if xlim is set correctly when ax contains multiple different kinds + # of plots, GH 27686 + fig, ax = mpl.pyplot.subplots() + + indexes = ["k1", "k2", "k3", "k4"] + df = DataFrame( + { + "s1": [1000, 2000, 1500, 2000], + "s2": [900, 1400, 2000, 3000], + "s3": [1500, 1500, 1600, 1200], + "secondary_y": [1, 3, 4, 3], + }, + index=indexes, + ) + df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False) + df[["secondary_y"]].plot(ax=ax, secondary_y=True) + + xlims = ax.get_xlim() + assert xlims[0] < 0 + assert xlims[1] > 3 + + # make sure axis labels are plotted correctly as well + xticklabels = [t.get_text() for t in ax.get_xticklabels()] + assert xticklabels == indexes + + def test_plot_no_rows(self): + # GH 27758 + df = DataFrame(columns=["foo"], dtype=int) + assert df.empty + ax = df.plot() + assert len(ax.get_lines()) == 1 + line = ax.get_lines()[0] + assert len(line.get_xdata()) == 0 + assert len(line.get_ydata()) == 0 + + def test_plot_no_numeric_data(self): + df = DataFrame(["a", "b", "c"]) + with pytest.raises(TypeError, match="no numeric data to plot"): + df.plot() + + @pytest.mark.parametrize( + "kind", ("line", "bar", "barh", "hist", "kde", "density", "area", "pie") + ) + def test_group_subplot(self, kind): + pytest.importorskip("scipy") + d = { + "a": np.arange(10), + "b": np.arange(10) + 1, + "c": np.arange(10) + 1, + "d": np.arange(10), + "e": np.arange(10), + } + df = DataFrame(d) + + axes = df.plot(subplots=[("b", "e"), ("c", "d")], kind=kind) + assert len(axes) == 3 # 2 groups + single column a + + expected_labels = (["b", "e"], ["c", "d"], ["a"]) + for ax, labels in zip(axes, expected_labels): + if kind != "pie": + _check_legend_labels(ax, labels=labels) + if kind == "line": + assert len(ax.lines) == len(labels) + + def test_group_subplot_series_notimplemented(self): + ser = Series(range(1)) + msg = "An iterable subplots for a Series" + with pytest.raises(NotImplementedError, match=msg): + ser.plot(subplots=[("a",)]) + + def test_group_subplot_multiindex_notimplemented(self): + df = DataFrame(np.eye(2), columns=MultiIndex.from_tuples([(0, 1), (1, 2)])) + msg = "An iterable subplots for a DataFrame with a MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + df.plot(subplots=[(0, 1)]) + + def test_group_subplot_nonunique_cols_notimplemented(self): + df = DataFrame(np.eye(2), columns=["a", "a"]) + msg = "An iterable subplots for a DataFrame with non-unique" + with pytest.raises(NotImplementedError, match=msg): + df.plot(subplots=[("a",)]) + + @pytest.mark.parametrize( + "subplots, expected_msg", + [ + (123, "subplots should be a bool or an iterable"), + ("a", "each entry should be a list/tuple"), # iterable of non-iterable + ((1,), "each entry should be a list/tuple"), # iterable of non-iterable + (("a",), "each entry should be a list/tuple"), # iterable of strings + ], + ) + def test_group_subplot_bad_input(self, subplots, expected_msg): + # Make sure error is raised when subplots is not a properly + # formatted iterable. Only iterables of iterables are permitted, and + # entries should not be strings. + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match=expected_msg): + df.plot(subplots=subplots) + + def test_group_subplot_invalid_column_name(self): + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match=r"Column label\(s\) \['bad_name'\]"): + df.plot(subplots=[("a", "bad_name")]) + + def test_group_subplot_duplicated_column(self): + d = {"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)} + df = DataFrame(d) + + with pytest.raises(ValueError, match="should be in only one subplot"): + df.plot(subplots=[("a", "b"), ("a", "c")]) + + @pytest.mark.parametrize("kind", ("box", "scatter", "hexbin")) + def test_group_subplot_invalid_kind(self, kind): + d = {"a": np.arange(10), "b": np.arange(10)} + df = DataFrame(d) + with pytest.raises( + ValueError, match="When subplots is an iterable, kind must be one of" + ): + df.plot(subplots=[("a", "b")], kind=kind) + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [ + (None, "", "new"), + ("old", "old", "new"), + (None, "", ""), + (None, "", 1), + (None, "", [1, 2]), + ], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar"]) + def test_xlabel_ylabel_dataframe_single_plot( + self, kind, index_name, old_label, new_label + ): + # GH 9093 + df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) + df.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name + ax = df.plot(kind=kind) + assert ax.get_xlabel() == old_label + assert ax.get_ylabel() == "" + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + ax = df.plot(kind=kind, ylabel=new_label, xlabel=new_label) + assert ax.get_ylabel() == str(new_label) + assert ax.get_xlabel() == str(new_label) + + @pytest.mark.parametrize( + "xlabel, ylabel", + [ + (None, None), + ("X Label", None), + (None, "Y Label"), + ("X Label", "Y Label"), + ], + ) + @pytest.mark.parametrize("kind", ["scatter", "hexbin"]) + def test_xlabel_ylabel_dataframe_plane_plot(self, kind, xlabel, ylabel): + # GH 37001 + xcol = "Type A" + ycol = "Type B" + df = DataFrame([[1, 2], [2, 5]], columns=[xcol, ycol]) + + # default is the labels are column names + ax = df.plot(kind=kind, x=xcol, y=ycol, xlabel=xlabel, ylabel=ylabel) + assert ax.get_xlabel() == (xcol if xlabel is None else xlabel) + assert ax.get_ylabel() == (ycol if ylabel is None else ylabel) + + @pytest.mark.parametrize("secondary_y", (False, True)) + def test_secondary_y(self, secondary_y): + ax_df = DataFrame([0]).plot( + secondary_y=secondary_y, ylabel="Y", ylim=(0, 100), yticks=[99] + ) + for ax in ax_df.figure.axes: + if ax.yaxis.get_visible(): + assert ax.get_ylabel() == "Y" + assert ax.get_ylim() == (0, 100) + assert ax.get_yticks()[0] == 99 + + @pytest.mark.slow + def test_plot_no_warning(self): + # GH 55138 + # TODO(3.0): this can be removed once Period[B] deprecation is enforced + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + columns=Index(list("ABCD"), dtype=object), + index=date_range("2000-01-01", periods=10, freq="B"), + ) + with tm.assert_produces_warning(False): + _ = df.plot() + _ = df.T.plot() + + +def _generate_4_axes_via_gridspec(): + import matplotlib.pyplot as plt + + gs = mpl.gridspec.GridSpec(2, 2) + ax_tl = plt.subplot(gs[0, 0]) + ax_ll = plt.subplot(gs[1, 0]) + ax_tr = plt.subplot(gs[0, 1]) + ax_lr = plt.subplot(gs[1, 1]) + + return gs, [ax_tl, ax_ll, ax_tr, ax_lr] diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py new file mode 100644 index 0000000000000000000000000000000000000000..ff1edd323ef280cef5e7e79aa809906434a86407 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py @@ -0,0 +1,670 @@ +""" Test cases for DataFrame.plot """ +import re + +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_colors, + _check_plot_works, + _unpack_cycler, +) +from pandas.util.version import Version + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") +cm = pytest.importorskip("matplotlib.cm") + + +def _check_colors_box(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None): + if fliers_c is None: + fliers_c = "k" + _check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"])) + _check_colors(bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])) + _check_colors(bp["medians"], linecolors=[medians_c] * len(bp["medians"])) + _check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"])) + _check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"])) + + +class TestDataFrameColor: + @pytest.mark.parametrize( + "color", ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"] + ) + def test_mpl2_color_cycle_str(self, color): + # GH 15516 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"] + ) + _check_plot_works(df.plot, color=color) + + def test_color_single_series_list(self): + # GH 3486 + df = DataFrame({"A": [1, 2, 3]}) + _check_plot_works(df.plot, color=["red"]) + + @pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)]) + def test_rgb_tuple_color(self, color): + # GH 16695 + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + _check_plot_works(df.plot, x="x", y="y", color=color) + + def test_color_empty_string(self): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + with pytest.raises(ValueError, match="Invalid color argument:"): + df.plot(color="") + + def test_color_and_style_arguments(self): + df = DataFrame({"x": [1, 2], "y": [3, 4]}) + # passing both 'color' and 'style' arguments should be allowed + # if there is no color symbol in the style strings: + ax = df.plot(color=["red", "black"], style=["-", "--"]) + # check that the linestyles are correctly set: + linestyle = [line.get_linestyle() for line in ax.lines] + assert linestyle == ["-", "--"] + # check that the colors are correctly set: + color = [line.get_color() for line in ax.lines] + assert color == ["red", "black"] + # passing both 'color' and 'style' arguments should not be allowed + # if there is a color symbol in the style strings: + msg = ( + "Cannot pass 'style' string with a color symbol and 'color' keyword " + "argument. Please use one or the other or pass 'style' without a color " + "symbol" + ) + with pytest.raises(ValueError, match=msg): + df.plot(color=["red", "black"], style=["k-", "r--"]) + + @pytest.mark.parametrize( + "color, expected", + [ + ("green", ["green"] * 4), + (["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]), + ], + ) + def test_color_and_marker(self, color, expected): + # GH 21003 + df = DataFrame(np.random.default_rng(2).random((7, 4))) + ax = df.plot(color=color, style="d--") + # check colors + result = [i.get_color() for i in ax.lines] + assert result == expected + # check markers and linestyles + assert all(i.get_linestyle() == "--" for i in ax.lines) + assert all(i.get_marker() == "d" for i in ax.lines) + + def test_bar_colors(self): + default_colors = _unpack_cycler(plt.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar() + _check_colors(ax.patches[::5], facecolors=default_colors[:5]) + + def test_bar_colors_custom(self): + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar(color=custom_colors) + _check_colors(ax.patches[::5], facecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_bar_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot.bar(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)] + _check_colors(ax.patches[::5], facecolors=rgba_colors) + + def test_bar_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.loc[:, [0]].plot.bar(color="DodgerBlue") + _check_colors([ax.patches[0]], facecolors=["DodgerBlue"]) + + def test_bar_colors_green(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(kind="bar", color="green") + _check_colors(ax.patches[::5], facecolors=["green"] * 5) + + def test_bar_user_colors(self): + df = DataFrame( + {"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]} + ) + # This should *only* work when `y` is specified, else + # we use one color per column + ax = df.plot.bar(y="A", color=df["color"]) + result = [p.get_facecolor() for p in ax.patches] + expected = [ + (1.0, 0.0, 0.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (0.0, 0.0, 1.0, 1.0), + (1.0, 0.0, 0.0, 1.0), + ] + assert result == expected + + def test_if_scatterplot_colorbar_affects_xaxis_visibility(self): + # addressing issue #10611, to ensure colobar does not + # interfere with x-axis label and ticklabels with + # ipython inline backend. + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + ax1 = df.plot.scatter(x="A label", y="B label") + ax2 = df.plot.scatter(x="A label", y="B label", c="C label") + + vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()] + vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()] + assert vis1 == vis2 + + vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()] + vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()] + assert vis1 == vis2 + + assert ( + ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible() + ) + + def test_if_hexbin_xaxis_label_is_visible(self): + # addressing issue #10678, to ensure colobar does not + # interfere with x-axis label and ticklabels with + # ipython inline backend. + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + ax = df.plot.hexbin("A label", "B label", gridsize=12) + assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels()) + assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels()) + assert ax.xaxis.get_label().get_visible() + + def test_if_scatterplot_colorbars_are_next_to_parent_axes(self): + random_array = np.random.default_rng(2).random((10, 3)) + df = DataFrame(random_array, columns=["A label", "B label", "C label"]) + + fig, axes = plt.subplots(1, 2) + df.plot.scatter("A label", "B label", c="C label", ax=axes[0]) + df.plot.scatter("A label", "B label", c="C label", ax=axes[1]) + plt.tight_layout() + + points = np.array([ax.get_position().get_points() for ax in fig.axes]) + axes_x_coords = points[:, :, 0] + parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :] + colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :] + assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all() + + @pytest.mark.parametrize("cmap", [None, "Greys"]) + def test_scatter_with_c_column_name_with_colors(self, cmap): + # https://github.com/pandas-dev/pandas/issues/34316 + + df = DataFrame( + [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]], + columns=["length", "width"], + ) + df["species"] = ["r", "r", "g", "g", "b"] + if cmap is not None: + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + ax = df.plot.scatter(x=0, y=1, cmap=cmap, c="species") + else: + ax = df.plot.scatter(x=0, y=1, c="species", cmap=cmap) + assert ax.collections[0].colorbar is None + + def test_scatter_colors(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + with pytest.raises(TypeError, match="Specify exactly one of `c` and `color`"): + df.plot.scatter(x="a", y="b", c="c", color="green") + + def test_scatter_colors_not_raising_warnings(self): + # GH-53908. Do not raise UserWarning: No data for colormapping + # provided via 'c'. Parameters 'cmap' will be ignored + df = DataFrame({"x": [1, 2, 3], "y": [1, 2, 3]}) + with tm.assert_produces_warning(None): + df.plot.scatter(x="x", y="y", c="b") + + def test_scatter_colors_default(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + ax = df.plot.scatter(x="a", y="b", c="c") + tm.assert_numpy_array_equal( + ax.collections[0].get_facecolor()[0], + np.array(mpl.colors.ColorConverter.to_rgba(default_colors[0])), + ) + + def test_scatter_colors_white(self): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]}) + ax = df.plot.scatter(x="a", y="b", color="white") + tm.assert_numpy_array_equal( + ax.collections[0].get_facecolor()[0], + np.array([1, 1, 1, 1], dtype=np.float64), + ) + + def test_scatter_colorbar_different_cmap(self): + # GH 33389 + df = DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]}) + df["x2"] = df["x"] + 1 + + _, ax = plt.subplots() + df.plot("x", "y", c="c", kind="scatter", cmap="cividis", ax=ax) + df.plot("x2", "y", c="c", kind="scatter", cmap="magma", ax=ax) + + assert ax.collections[0].cmap.name == "cividis" + assert ax.collections[1].cmap.name == "magma" + + def test_line_colors(self): + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + ax = df.plot(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + plt.close("all") + + ax2 = df.plot(color=custom_colors) + lines2 = ax2.get_lines() + + for l1, l2 in zip(ax.get_lines(), lines2): + assert l1.get_color() == l2.get_color() + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_line_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=rgba_colors) + + def test_line_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + ax = df.loc[:, [0]].plot(color="DodgerBlue") + _check_colors(ax.lines, linecolors=["DodgerBlue"]) + + def test_line_colors_single_color(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(color="red") + _check_colors(ax.get_lines(), linecolors=["red"] * 5) + + def test_line_colors_hex(self): + # GH 10299 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"] + ax = df.plot(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + def test_dont_modify_colors(self): + colors = ["r", "g", "b"] + DataFrame(np.random.default_rng(2).random((10, 2))).plot(color=colors) + assert len(colors) == 3 + + def test_line_colors_and_styles_subplots(self): + # GH 9894 + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + axes = df.plot(subplots=True) + for ax, c in zip(axes, list(default_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("color", ["k", "green"]) + def test_line_colors_and_styles_subplots_single_color_str(self, color): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(subplots=True, color=color) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=[color]) + + @pytest.mark.parametrize("color", ["rgcby", list("rgcby")]) + def test_line_colors_and_styles_subplots_custom_colors(self, color): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(color=color, subplots=True) + for ax, c in zip(axes, list(color)): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_line_colors_and_styles_subplots_colormap_hex(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # GH 10299 + custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"] + axes = df.plot(color=custom_colors, subplots=True) + for ax, c in zip(axes, list(custom_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("cmap", ["jet", cm.jet]) + def test_line_colors_and_styles_subplots_colormap_subplot(self, cmap): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + axes = df.plot(colormap=cmap, subplots=True) + for ax, c in zip(axes, rgba_colors): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_line_colors_and_styles_subplots_single_col(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True) + _check_colors(axes[0].lines, linecolors=["DodgerBlue"]) + + def test_line_colors_and_styles_subplots_single_char(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # single character style + axes = df.plot(style="r", subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=["r"]) + + def test_line_colors_and_styles_subplots_list_styles(self): + # GH 9894 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + styles = list("rgcby") + axes = df.plot(style=styles, subplots=True) + for ax, c in zip(axes, styles): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_area_colors(self): + from matplotlib.collections import PolyCollection + + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).random((5, 5))) + + ax = df.plot.area(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + _check_colors(poly, facecolors=custom_colors) + + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=custom_colors) + + for h in handles: + assert h.get_alpha() is None + + def test_area_colors_poly(self): + from matplotlib import cm + from matplotlib.collections import PolyCollection + + df = DataFrame(np.random.default_rng(2).random((5, 5))) + ax = df.plot.area(colormap="jet") + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=jet_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + _check_colors(poly, facecolors=jet_colors) + + handles, _ = ax.get_legend_handles_labels() + _check_colors(handles, facecolors=jet_colors) + for h in handles: + assert h.get_alpha() is None + + def test_area_colors_stacked_false(self): + from matplotlib import cm + from matplotlib.collections import PolyCollection + + df = DataFrame(np.random.default_rng(2).random((5, 5))) + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + # When stacked=False, alpha is set to 0.5 + ax = df.plot.area(colormap=cm.jet, stacked=False) + _check_colors(ax.get_lines(), linecolors=jet_colors) + poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)] + jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors] + _check_colors(poly, facecolors=jet_with_alpha) + + handles, _ = ax.get_legend_handles_labels() + linecolors = jet_with_alpha + _check_colors(handles[: len(jet_colors)], linecolors=linecolors) + for h in handles: + assert h.get_alpha() == 0.5 + + def test_hist_colors(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.hist() + _check_colors(ax.patches[::10], facecolors=default_colors[:5]) + + def test_hist_colors_single_custom(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = "rgcby" + ax = df.plot.hist(color=custom_colors) + _check_colors(ax.patches[::10], facecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_hist_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.hist(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)] + _check_colors(ax.patches[::10], facecolors=rgba_colors) + + def test_hist_colors_single_col(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.loc[:, [0]].plot.hist(color="DodgerBlue") + _check_colors([ax.patches[0]], facecolors=["DodgerBlue"]) + + def test_hist_colors_single_color(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(kind="hist", color="green") + _check_colors(ax.patches[::10], facecolors=["green"] * 5) + + def test_kde_colors(self): + pytest.importorskip("scipy") + custom_colors = "rgcby" + df = DataFrame(np.random.default_rng(2).random((5, 5))) + + ax = df.plot.kde(color=custom_colors) + _check_colors(ax.get_lines(), linecolors=custom_colors) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_kde_colors_cmap(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.kde(colormap=colormap) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + _check_colors(ax.get_lines(), linecolors=rgba_colors) + + def test_kde_colors_and_styles_subplots(self): + pytest.importorskip("scipy") + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + + axes = df.plot(kind="kde", subplots=True) + for ax, c in zip(axes, list(default_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("colormap", ["k", "red"]) + def test_kde_colors_and_styles_subplots_single_col_str(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + axes = df.plot(kind="kde", color=colormap, subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=[colormap]) + + def test_kde_colors_and_styles_subplots_custom_color(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + custom_colors = "rgcby" + axes = df.plot(kind="kde", color=custom_colors, subplots=True) + for ax, c in zip(axes, list(custom_colors)): + _check_colors(ax.get_lines(), linecolors=[c]) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_kde_colors_and_styles_subplots_cmap(self, colormap): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))] + axes = df.plot(kind="kde", colormap=colormap, subplots=True) + for ax, c in zip(axes, rgba_colors): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_kde_colors_and_styles_subplots_single_col(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # make color a list if plotting one column frame + # handles cases like df.plot(color='DodgerBlue') + axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True) + _check_colors(axes[0].lines, linecolors=["DodgerBlue"]) + + def test_kde_colors_and_styles_subplots_single_char(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + # single character style + axes = df.plot(kind="kde", style="r", subplots=True) + for ax in axes: + _check_colors(ax.get_lines(), linecolors=["r"]) + + def test_kde_colors_and_styles_subplots_list(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # list of styles + styles = list("rgcby") + axes = df.plot(kind="kde", style=styles, subplots=True) + for ax, c in zip(axes, styles): + _check_colors(ax.get_lines(), linecolors=[c]) + + def test_boxplot_colors(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + bp = df.plot.box(return_type="dict") + _check_colors_box( + bp, + default_colors[0], + default_colors[0], + default_colors[2], + default_colors[0], + ) + + def test_boxplot_colors_dict_colors(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + dict_colors = { + "boxes": "#572923", + "whiskers": "#982042", + "medians": "#804823", + "caps": "#123456", + } + bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict") + _check_colors_box( + bp, + dict_colors["boxes"], + dict_colors["whiskers"], + dict_colors["medians"], + dict_colors["caps"], + "r", + ) + + def test_boxplot_colors_default_color(self): + default_colors = _unpack_cycler(mpl.pyplot.rcParams) + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # partial colors + dict_colors = {"whiskers": "c", "medians": "m"} + bp = df.plot.box(color=dict_colors, return_type="dict") + _check_colors_box(bp, default_colors[0], "c", "m", default_colors[0]) + + @pytest.mark.parametrize("colormap", ["jet", cm.jet]) + def test_boxplot_colors_cmap(self, colormap): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + bp = df.plot.box(colormap=colormap, return_type="dict") + jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)] + _check_colors_box( + bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0] + ) + + def test_boxplot_colors_single(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # string color is applied to all artists except fliers + bp = df.plot.box(color="DodgerBlue", return_type="dict") + _check_colors_box(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue") + + def test_boxplot_colors_tuple(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + # tuple is also applied to all artists except fliers + bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict") + _check_colors_box(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456") + + def test_boxplot_colors_invalid(self): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + msg = re.escape( + "color dict contains invalid key 'xxxx'. The key must be either " + "['boxes', 'whiskers', 'medians', 'caps']" + ) + with pytest.raises(ValueError, match=msg): + # Color contains invalid key results in ValueError + df.plot.box(color={"boxes": "red", "xxxx": "blue"}) + + def test_default_color_cycle(self): + import cycler + + colors = list("rgbk") + plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors) + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) + ax = df.plot() + + expected = _unpack_cycler(plt.rcParams)[:3] + _check_colors(ax.get_lines(), linecolors=expected) + + def test_no_color_bar(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + ax = df.plot.hexbin(x="A", y="B", colorbar=None) + assert ax.collections[0].colorbar is None + + def test_mixing_cmap_and_colormap_raises(self): + df = DataFrame( + { + "A": np.random.default_rng(2).uniform(size=20), + "B": np.random.default_rng(2).uniform(size=20), + "C": np.arange(20) + np.random.default_rng(2).uniform(size=20), + } + ) + msg = "Only specify one of `cmap` and `colormap`" + with pytest.raises(TypeError, match=msg): + df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn") + + def test_passed_bar_colors(self): + color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)] + colormap = mpl.colors.ListedColormap(color_tuples) + barplot = DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap) + assert color_tuples == [c.get_facecolor() for c in barplot.patches] + + def test_rcParams_bar_colors(self): + color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)] + with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}): + barplot = DataFrame([[1, 2, 3]]).plot(kind="bar") + assert color_tuples == [c.get_facecolor() for c in barplot.patches] + + def test_colors_of_columns_with_same_name(self): + # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136 + # Creating a DataFrame with duplicate column labels and testing colors of them. + df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]}) + df1 = DataFrame({"a": [2, 4, 6]}) + df_concat = pd.concat([df, df1], axis=1) + result = df_concat.plot() + legend = result.get_legend() + if Version(mpl.__version__) < Version("3.7"): + handles = legend.legendHandles + else: + handles = legend.legend_handles + for legend, line in zip(handles, result.lines): + assert legend.get_color() == line.get_color() + + def test_invalid_colormap(self): + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 2)), columns=["A", "B"] + ) + msg = "(is not a valid value)|(is not a known colormap)" + with pytest.raises((ValueError, KeyError), match=msg): + df.plot(colormap="invalid_colormap") + + def test_dataframe_none_color(self): + # GH51953 + df = DataFrame([[1, 2, 3]]) + ax = df.plot(color=None) + expected = _unpack_cycler(mpl.pyplot.rcParams)[:3] + _check_colors(ax.get_lines(), linecolors=expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..f1924185a3df1cae2f0df89ec84225cd68f8fa6d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py @@ -0,0 +1,72 @@ +""" Test cases for DataFrame.plot """ + +import pytest + +from pandas import DataFrame +from pandas.tests.plotting.common import _check_visible + +pytest.importorskip("matplotlib") + + +class TestDataFramePlotsGroupby: + def _assert_ytickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + _check_visible(ax.get_yticklabels(), visible=exp) + + def _assert_xtickslabels_visibility(self, axes, expected): + for ax, exp in zip(axes, expected): + _check_visible(ax.get_xticklabels(), visible=exp) + + @pytest.mark.parametrize( + "kwargs, expected", + [ + # behavior without keyword + ({}, [True, False, True, False]), + # set sharey=True should be identical + ({"sharey": True}, [True, False, True, False]), + # sharey=False, all yticklabels should be visible + ({"sharey": False}, [True, True, True, True]), + ], + ) + def test_groupby_boxplot_sharey(self, kwargs, expected): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharey can now be switched check whether the right + # pair of axes is turned on or off + df = DataFrame( + { + "a": [-1.43, -0.15, -3.70, -1.43, -0.14], + "b": [0.56, 0.84, 0.29, 0.56, 0.85], + "c": [0, 1, 2, 3, 1], + }, + index=[0, 1, 2, 3, 4], + ) + axes = df.groupby("c").boxplot(**kwargs) + self._assert_ytickslabels_visibility(axes, expected) + + @pytest.mark.parametrize( + "kwargs, expected", + [ + # behavior without keyword + ({}, [True, True, True, True]), + # set sharex=False should be identical + ({"sharex": False}, [True, True, True, True]), + # sharex=True, xticklabels should be visible + # only for bottom plots + ({"sharex": True}, [False, False, True, True]), + ], + ) + def test_groupby_boxplot_sharex(self, kwargs, expected): + # https://github.com/pandas-dev/pandas/issues/20968 + # sharex can now be switched check whether the right + # pair of axes is turned on or off + + df = DataFrame( + { + "a": [-1.43, -0.15, -3.70, -1.43, -0.14], + "b": [0.56, 0.84, 0.29, 0.56, 0.85], + "c": [0, 1, 2, 3, 1], + }, + index=[0, 1, 2, 3, 4], + ) + axes = df.groupby("c").boxplot(**kwargs) + self._assert_xtickslabels_visibility(axes, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py new file mode 100644 index 0000000000000000000000000000000000000000..402a4b9531e5d4857d0d6e9d7cda2c002d0469d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py @@ -0,0 +1,272 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + date_range, +) +from pandas.tests.plotting.common import ( + _check_legend_labels, + _check_legend_marker, + _check_text_labels, +) +from pandas.util.version import Version + +mpl = pytest.importorskip("matplotlib") + + +class TestFrameLegend: + @pytest.mark.xfail( + reason=( + "Open bug in matplotlib " + "https://github.com/matplotlib/matplotlib/issues/11357" + ) + ) + def test_mixed_yerr(self): + # https://github.com/pandas-dev/pandas/issues/39522 + from matplotlib.collections import LineCollection + from matplotlib.lines import Line2D + + df = DataFrame([{"x": 1, "a": 1, "b": 1}, {"x": 2, "a": 2, "b": 3}]) + + ax = df.plot("x", "a", c="orange", yerr=0.1, label="orange") + df.plot("x", "b", c="blue", yerr=None, ax=ax, label="blue") + + legend = ax.get_legend() + if Version(mpl.__version__) < Version("3.7"): + result_handles = legend.legendHandles + else: + result_handles = legend.legend_handles + + assert isinstance(result_handles[0], LineCollection) + assert isinstance(result_handles[1], Line2D) + + def test_legend_false(self): + # https://github.com/pandas-dev/pandas/issues/40044 + df = DataFrame({"a": [1, 1], "b": [2, 3]}) + df2 = DataFrame({"d": [2.5, 2.5]}) + + ax = df.plot(legend=True, color={"a": "blue", "b": "green"}, secondary_y="b") + df2.plot(legend=True, color={"d": "red"}, ax=ax) + legend = ax.get_legend() + if Version(mpl.__version__) < Version("3.7"): + handles = legend.legendHandles + else: + handles = legend.legend_handles + result = [handle.get_color() for handle in handles] + expected = ["blue", "green", "red"] + assert result == expected + + @pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "area", "hist"]) + def test_df_legend_labels(self, kind): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + df2 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"] + ) + df3 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["g", "h", "i"] + ) + df4 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["j", "k", "l"] + ) + + ax = df.plot(kind=kind, legend=True) + _check_legend_labels(ax, labels=df.columns) + + ax = df2.plot(kind=kind, legend=False, ax=ax) + _check_legend_labels(ax, labels=df.columns) + + ax = df3.plot(kind=kind, legend=True, ax=ax) + _check_legend_labels(ax, labels=df.columns.union(df3.columns)) + + ax = df4.plot(kind=kind, legend="reverse", ax=ax) + expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns)) + _check_legend_labels(ax, labels=expected) + + def test_df_legend_labels_secondary_y(self): + pytest.importorskip("scipy") + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + df2 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"] + ) + df3 = DataFrame( + np.random.default_rng(2).random((3, 3)), columns=["g", "h", "i"] + ) + # Secondary Y + ax = df.plot(legend=True, secondary_y="b") + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df2.plot(legend=False, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"]) + + def test_df_legend_labels_time_series(self): + # Time Series + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["d", "e", "f"], + index=ind, + ) + df3 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["g", "h", "i"], + index=ind, + ) + ax = df.plot(legend=True, secondary_y="b") + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df2.plot(legend=False, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c"]) + ax = df3.plot(legend=True, ax=ax) + _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"]) + + def test_df_legend_labels_time_series_scatter(self): + # Time Series + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + df2 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["d", "e", "f"], + index=ind, + ) + df3 = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["g", "h", "i"], + index=ind, + ) + # scatter + ax = df.plot.scatter(x="a", y="b", label="data1") + _check_legend_labels(ax, labels=["data1"]) + ax = df2.plot.scatter(x="d", y="e", legend=False, label="data2", ax=ax) + _check_legend_labels(ax, labels=["data1"]) + ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax) + _check_legend_labels(ax, labels=["data1", "data3"]) + + def test_df_legend_labels_time_series_no_mutate(self): + pytest.importorskip("scipy") + ind = date_range("1/1/2014", periods=3) + df = DataFrame( + np.random.default_rng(2).standard_normal((3, 3)), + columns=["a", "b", "c"], + index=ind, + ) + # ensure label args pass through and + # index name does not mutate + # column names don't mutate + df5 = df.set_index("a") + ax = df5.plot(y="b") + _check_legend_labels(ax, labels=["b"]) + ax = df5.plot(y="b", label="LABEL_b") + _check_legend_labels(ax, labels=["LABEL_b"]) + _check_text_labels(ax.xaxis.get_label(), "a") + ax = df5.plot(y="c", label="LABEL_c", ax=ax) + _check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"]) + assert df5.columns.tolist() == ["b", "c"] + + def test_missing_marker_multi_plots_on_same_ax(self): + # GH 18222 + df = DataFrame(data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"]) + _, ax = mpl.pyplot.subplots(nrows=1, ncols=3) + # Left plot + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0]) + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0]) + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0]) + _check_legend_labels(ax[0], labels=["r", "g", "b"]) + _check_legend_marker(ax[0], expected_markers=["o", "x", "o"]) + # Center plot + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1]) + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1]) + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1]) + _check_legend_labels(ax[1], labels=["b", "r", "g"]) + _check_legend_marker(ax[1], expected_markers=["o", "o", "x"]) + # Right plot + df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2]) + df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2]) + df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2]) + _check_legend_labels(ax[2], labels=["g", "b", "r"]) + _check_legend_marker(ax[2], expected_markers=["x", "o", "o"]) + + def test_legend_name(self): + multi = DataFrame( + np.random.default_rng(2).standard_normal((4, 4)), + columns=[np.array(["a", "a", "b", "b"]), np.array(["x", "y", "x", "y"])], + ) + multi.columns.names = ["group", "individual"] + + ax = multi.plot() + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot(legend=True, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + df.columns.name = "new" + ax = df.plot(legend=False, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "group,individual") + + ax = df.plot(legend=True, ax=ax) + leg_title = ax.legend_.get_title() + _check_text_labels(leg_title, "new") + + @pytest.mark.parametrize( + "kind", + [ + "line", + "bar", + "barh", + pytest.param("kde", marks=td.skip_if_no("scipy")), + "area", + "hist", + ], + ) + def test_no_legend(self, kind): + df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"]) + ax = df.plot(kind=kind, legend=False) + _check_legend_labels(ax, visible=False) + + def test_missing_markers_legend(self): + # 14958 + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), columns=["A", "B", "C"] + ) + ax = df.plot(y=["A"], marker="x", linestyle="solid") + df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax) + df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax) + + _check_legend_labels(ax, labels=["A", "B", "C"]) + _check_legend_marker(ax, expected_markers=["x", "o", "<"]) + + def test_missing_markers_legend_using_style(self): + # 14563 + df = DataFrame( + { + "A": [1, 2, 3, 4, 5, 6], + "B": [2, 4, 1, 3, 2, 4], + "C": [3, 3, 2, 6, 4, 2], + "X": [1, 2, 3, 4, 5, 6], + } + ) + + _, ax = mpl.pyplot.subplots() + for kind in "ABC": + df.plot("X", kind, label=kind, ax=ax, style=".") + + _check_legend_labels(ax, labels=["A", "B", "C"]) + _check_legend_marker(ax, expected_markers=[".", ".", "."]) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py new file mode 100644 index 0000000000000000000000000000000000000000..4d8d8fa4cdee38d568d099019e89114fb0cdb4e9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py @@ -0,0 +1,752 @@ +""" Test cases for DataFrame.plot """ + +import string + +import numpy as np +import pytest + +from pandas.compat import is_platform_linux +from pandas.compat.numpy import np_version_gte1p24 + +import pandas as pd +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_box_return_type, + _check_legend_labels, + _check_ticks_props, + _check_visible, + _flatten_visible, +) + +from pandas.io.formats.printing import pprint_thing + +mpl = pytest.importorskip("matplotlib") +plt = pytest.importorskip("matplotlib.pyplot") + + +class TestDataFramePlotsSubplots: + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + assert axes.shape == (3,) + + for ax, column in zip(axes, df.columns): + _check_legend_labels(ax, labels=[pprint_thing(column)]) + + for ax in axes[:-2]: + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + if kind != "bar": + # change https://github.com/pandas-dev/pandas/issues/26714 + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible(ax.xaxis.get_label(), visible=False) + _check_visible(ax.get_yticklabels()) + + _check_visible(axes[-1].xaxis) + _check_visible(axes[-1].get_xticklabels()) + _check_visible(axes[-1].get_xticklabels(minor=True)) + _check_visible(axes[-1].xaxis.get_label()) + _check_visible(axes[-1].get_yticklabels()) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots_no_share_x(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(kind=kind, subplots=True, sharex=False) + for ax in axes: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible(ax.get_xticklabels(minor=True)) + _check_visible(ax.xaxis.get_label()) + _check_visible(ax.get_yticklabels()) + + @pytest.mark.slow + @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"]) + def test_subplots_no_legend(self, kind): + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(kind=kind, subplots=True, legend=False) + for ax in axes: + assert ax.get_legend() is None + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_subplots_timeseries(self, kind): + idx = date_range(start="2014-07-01", freq="ME", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + + axes = df.plot(kind=kind, subplots=True, sharex=True) + _check_axes_shape(axes, axes_num=3, layout=(3, 1)) + + for ax in axes[:-2]: + # GH 7801 + _check_visible(ax.xaxis) # xaxis must be visible for grid + _check_visible(ax.get_xticklabels(), visible=False) + _check_visible(ax.get_xticklabels(minor=True), visible=False) + _check_visible(ax.xaxis.get_label(), visible=False) + _check_visible(ax.get_yticklabels()) + + _check_visible(axes[-1].xaxis) + _check_visible(axes[-1].get_xticklabels()) + _check_visible(axes[-1].get_xticklabels(minor=True)) + _check_visible(axes[-1].xaxis.get_label()) + _check_visible(axes[-1].get_yticklabels()) + _check_ticks_props(axes, xrot=0) + + @pytest.mark.parametrize("kind", ["line", "area"]) + def test_subplots_timeseries_rot(self, kind): + idx = date_range(start="2014-07-01", freq="ME", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7) + for ax in axes: + _check_visible(ax.xaxis) + _check_visible(ax.get_xticklabels()) + _check_visible(ax.get_xticklabels(minor=True)) + _check_visible(ax.xaxis.get_label()) + _check_visible(ax.get_yticklabels()) + _check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7) + + @pytest.mark.parametrize( + "col", ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"] + ) + def test_subplots_timeseries_y_axis(self, col): + # GH16953 + data = { + "numeric": np.array([1, 2, 5]), + "timedelta": [ + pd.Timedelta(-10, unit="s"), + pd.Timedelta(10, unit="m"), + pd.Timedelta(10, unit="h"), + ], + "datetime_no_tz": [ + pd.to_datetime("2017-08-01 00:00:00"), + pd.to_datetime("2017-08-01 02:00:00"), + pd.to_datetime("2017-08-02 00:00:00"), + ], + "datetime_all_tz": [ + pd.to_datetime("2017-08-01 00:00:00", utc=True), + pd.to_datetime("2017-08-01 02:00:00", utc=True), + pd.to_datetime("2017-08-02 00:00:00", utc=True), + ], + "text": ["This", "should", "fail"], + } + testdata = DataFrame(data) + + ax = testdata.plot(y=col) + result = ax.get_lines()[0].get_data()[1] + expected = testdata[col].values + assert (result == expected).all() + + def test_subplots_timeseries_y_text_error(self): + # GH16953 + data = { + "numeric": np.array([1, 2, 5]), + "text": ["This", "should", "fail"], + } + testdata = DataFrame(data) + msg = "no numeric data to plot" + with pytest.raises(TypeError, match=msg): + testdata.plot(y="text") + + @pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz") + def test_subplots_timeseries_y_axis_not_supported(self): + """ + This test will fail for: + period: + since period isn't yet implemented in ``select_dtypes`` + and because it will need a custom value converter + + tick formatter (as was done for x-axis plots) + + categorical: + because it will need a custom value converter + + tick formatter (also doesn't work for x-axis, as of now) + + datetime_mixed_tz: + because of the way how pandas handles ``Series`` of + ``datetime`` objects with different timezone, + generally converting ``datetime`` objects in a tz-aware + form could help with this problem + """ + data = { + "numeric": np.array([1, 2, 5]), + "period": [ + pd.Period("2017-08-01 00:00:00", freq="H"), + pd.Period("2017-08-01 02:00", freq="H"), + pd.Period("2017-08-02 00:00:00", freq="H"), + ], + "categorical": pd.Categorical( + ["c", "b", "a"], categories=["a", "b", "c"], ordered=False + ), + "datetime_mixed_tz": [ + pd.to_datetime("2017-08-01 00:00:00", utc=True), + pd.to_datetime("2017-08-01 02:00:00"), + pd.to_datetime("2017-08-02 00:00:00"), + ], + } + testdata = DataFrame(data) + ax_period = testdata.plot(x="numeric", y="period") + assert ( + ax_period.get_lines()[0].get_data()[1] == testdata["period"].values + ).all() + ax_categorical = testdata.plot(x="numeric", y="categorical") + assert ( + ax_categorical.get_lines()[0].get_data()[1] + == testdata["categorical"].values + ).all() + ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz") + assert ( + ax_datetime_mixed_tz.get_lines()[0].get_data()[1] + == testdata["datetime_mixed_tz"].values + ).all() + + @pytest.mark.parametrize( + "layout, exp_layout", + [ + [(2, 2), (2, 2)], + [(-1, 2), (2, 2)], + [(2, -1), (2, 2)], + [(1, 4), (1, 4)], + [(-1, 4), (1, 4)], + [(4, -1), (4, 1)], + ], + ) + def test_subplots_layout_multi_column(self, layout, exp_layout): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(subplots=True, layout=layout) + _check_axes_shape(axes, axes_num=3, layout=exp_layout) + assert axes.shape == exp_layout + + def test_subplots_layout_multi_column_error(self): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + msg = "Layout of 1x1 must be larger than required size 3" + + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, layout=(1, 1)) + + msg = "At least one dimension of layout must be positive" + with pytest.raises(ValueError, match=msg): + df.plot(subplots=True, layout=(-1, -1)) + + @pytest.mark.parametrize( + "kwargs, expected_axes_num, expected_layout, expected_shape", + [ + ({}, 1, (1, 1), (1,)), + ({"layout": (3, 3)}, 1, (3, 3), (3, 3)), + ], + ) + def test_subplots_layout_single_column( + self, kwargs, expected_axes_num, expected_layout, expected_shape + ): + # GH 6667 + df = DataFrame( + np.random.default_rng(2).random((10, 1)), + index=list(string.ascii_letters[:10]), + ) + axes = df.plot(subplots=True, **kwargs) + _check_axes_shape( + axes, + axes_num=expected_axes_num, + layout=expected_layout, + ) + assert axes.shape == expected_shape + + @pytest.mark.slow + @pytest.mark.parametrize("idx", [range(5), date_range("1/1/2000", periods=5)]) + def test_subplots_warnings(self, idx): + # GH 9464 + with tm.assert_produces_warning(None): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 4)), index=idx) + df.plot(subplots=True, layout=(3, 2)) + + def test_subplots_multiple_axes(self): + # GH 5353, 6970, GH 7069 + fig, axes = mpl.pyplot.subplots(2, 3) + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + + returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + assert returned.shape == (3,) + assert returned[0].figure is fig + # draw on second row + returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False) + _check_axes_shape(returned, axes_num=3, layout=(1, 3)) + assert returned.shape == (3,) + assert returned[0].figure is fig + _check_axes_shape(axes, axes_num=6, layout=(2, 3)) + + def test_subplots_multiple_axes_error(self): + # GH 5353, 6970, GH 7069 + df = DataFrame( + np.random.default_rng(2).random((10, 3)), + index=list(string.ascii_letters[:10]), + ) + msg = "The number of passed axes must be 3, the same as the output plot" + _, axes = mpl.pyplot.subplots(2, 3) + + with pytest.raises(ValueError, match=msg): + # pass different number of axes from required + df.plot(subplots=True, ax=axes) + + @pytest.mark.parametrize( + "layout, exp_layout", + [ + [(2, 1), (2, 2)], + [(2, -1), (2, 2)], + [(-1, 2), (2, 2)], + ], + ) + def test_subplots_multiple_axes_2_dim(self, layout, exp_layout): + # GH 5353, 6970, GH 7069 + # pass 2-dim axes and invalid layout + # invalid lauout should not affect to input and return value + # (show warning is tested in + # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes + _, axes = mpl.pyplot.subplots(2, 2) + df = DataFrame( + np.random.default_rng(2).random((10, 4)), + index=list(string.ascii_letters[:10]), + ) + with tm.assert_produces_warning(UserWarning): + returned = df.plot( + subplots=True, ax=axes, layout=layout, sharex=False, sharey=False + ) + _check_axes_shape(returned, axes_num=4, layout=exp_layout) + assert returned.shape == (4,) + + def test_subplots_multiple_axes_single_col(self): + # GH 5353, 6970, GH 7069 + # single column + _, axes = mpl.pyplot.subplots(1, 1) + df = DataFrame( + np.random.default_rng(2).random((10, 1)), + index=list(string.ascii_letters[:10]), + ) + + axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False) + _check_axes_shape(axes, axes_num=1, layout=(1, 1)) + assert axes.shape == (1,) + + def test_subplots_ts_share_axes(self): + # GH 3964 + _, axes = mpl.pyplot.subplots(3, 3, sharex=True, sharey=True) + mpl.pyplot.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3) + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 9)), + index=date_range(start="2014-07-01", freq="ME", periods=10), + ) + for i, ax in enumerate(axes.ravel()): + df[i].plot(ax=ax, fontsize=5) + + # Rows other than bottom should not be visible + for ax in axes[0:-1].ravel(): + _check_visible(ax.get_xticklabels(), visible=False) + + # Bottom row should be visible + for ax in axes[-1].ravel(): + _check_visible(ax.get_xticklabels(), visible=True) + + # First column should be visible + for ax in axes[[0, 1, 2], [0]].ravel(): + _check_visible(ax.get_yticklabels(), visible=True) + + # Other columns should not be visible + for ax in axes[[0, 1, 2], [1]].ravel(): + _check_visible(ax.get_yticklabels(), visible=False) + for ax in axes[[0, 1, 2], [2]].ravel(): + _check_visible(ax.get_yticklabels(), visible=False) + + def test_subplots_sharex_axes_existing_axes(self): + # GH 9158 + d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]} + df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14")) + + axes = df[["A", "B"]].plot(subplots=True) + df["C"].plot(ax=axes[0], secondary_y=True) + + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + for ax in axes.ravel(): + _check_visible(ax.get_yticklabels(), visible=True) + + def test_subplots_dup_columns(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + axes = df.plot(subplots=True) + for ax in axes: + _check_legend_labels(ax, labels=["a"]) + assert len(ax.lines) == 1 + + def test_subplots_dup_columns_secondary_y(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + axes = df.plot(subplots=True, secondary_y="a") + for ax in axes: + # (right) is only attached when subplots=False + _check_legend_labels(ax, labels=["a"]) + assert len(ax.lines) == 1 + + def test_subplots_dup_columns_secondary_y_no_subplot(self): + # GH 10962 + df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa")) + ax = df.plot(secondary_y="a") + _check_legend_labels(ax, labels=["a (right)"] * 5) + assert len(ax.lines) == 0 + assert len(ax.right_ax.lines) == 5 + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + def test_bar_log_no_subplots(self): + # GH3254, GH3298 matplotlib/matplotlib#1882, #1892 + # regressions in 1.2.1 + expected = np.array([0.1, 1.0, 10.0, 100]) + + # no subplots + df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5)) + ax = df.plot.bar(grid=True, log=True) + tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected) + + @pytest.mark.xfail( + np_version_gte1p24 and is_platform_linux(), + reason="Weird rounding problems", + strict=False, + ) + def test_bar_log_subplots(self): + expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4]) + + ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar( + log=True, subplots=True + ) + + tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected) + tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected) + + def test_boxplot_subplots_return_type_default(self, hist_df): + df = hist_df + + # normal style: return_type=None + result = df.plot.box(subplots=True) + assert isinstance(result, Series) + _check_box_return_type( + result, None, expected_keys=["height", "weight", "category"] + ) + + @pytest.mark.parametrize("rt", ["dict", "axes", "both"]) + def test_boxplot_subplots_return_type(self, hist_df, rt): + df = hist_df + returned = df.plot.box(return_type=rt, subplots=True) + _check_box_return_type( + returned, + rt, + expected_keys=["height", "weight", "category"], + check_ax_title=False, + ) + + def test_df_subplots_patterns_minorticks(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + + # shared subplots + _, axes = plt.subplots(2, 1, sharex=True) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of 1st ax must be hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + + def test_df_subplots_patterns_minorticks_1st_ax_hidden(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + _, axes = plt.subplots(2, 1) + with tm.assert_produces_warning(UserWarning): + axes = df.plot(subplots=True, ax=axes, sharex=True) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + # xaxis of 1st ax must be hidden + _check_visible(axes[0].get_xticklabels(), visible=False) + _check_visible(axes[0].get_xticklabels(minor=True), visible=False) + _check_visible(axes[1].get_xticklabels(), visible=True) + _check_visible(axes[1].get_xticklabels(minor=True), visible=True) + + def test_df_subplots_patterns_minorticks_not_shared(self): + # GH 10657 + df = DataFrame( + np.random.default_rng(2).standard_normal((10, 2)), + index=date_range("1/1/2000", periods=10), + columns=list("AB"), + ) + # not shared + _, axes = plt.subplots(2, 1) + axes = df.plot(subplots=True, ax=axes) + for ax in axes: + assert len(ax.lines) == 1 + _check_visible(ax.get_yticklabels(), visible=True) + _check_visible(ax.get_xticklabels(), visible=True) + _check_visible(ax.get_xticklabels(minor=True), visible=True) + + def test_subplots_sharex_false(self): + # test when sharex is set to False, two plots should have different + # labels, GH 25160 + df = DataFrame(np.random.default_rng(2).random((10, 2))) + df.iloc[5:, 1] = np.nan + df.iloc[:5, 0] = np.nan + + _, axs = mpl.pyplot.subplots(2, 1) + df.plot.line(ax=axs, subplots=True, sharex=False) + + expected_ax1 = np.arange(4.5, 10, 0.5) + expected_ax2 = np.arange(-0.5, 5, 0.5) + + tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1) + tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2) + + def test_subplots_constrained_layout(self): + # GH 25261 + idx = date_range(start="now", periods=10) + df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx) + kwargs = {} + if hasattr(mpl.pyplot.Figure, "get_constrained_layout"): + kwargs["constrained_layout"] = True + _, axes = mpl.pyplot.subplots(2, **kwargs) + with tm.assert_produces_warning(None): + df.plot(ax=axes[0]) + with tm.ensure_clean(return_filelike=True) as path: + mpl.pyplot.savefig(path) + + @pytest.mark.parametrize( + "index_name, old_label, new_label", + [ + (None, "", "new"), + ("old", "old", "new"), + (None, "", ""), + (None, "", 1), + (None, "", [1, 2]), + ], + ) + @pytest.mark.parametrize("kind", ["line", "area", "bar"]) + def test_xlabel_ylabel_dataframe_subplots( + self, kind, index_name, old_label, new_label + ): + # GH 9093 + df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"]) + df.index.name = index_name + + # default is the ylabel is not shown and xlabel is index name + axes = df.plot(kind=kind, subplots=True) + assert all(ax.get_ylabel() == "" for ax in axes) + assert all(ax.get_xlabel() == old_label for ax in axes) + + # old xlabel will be overridden and assigned ylabel will be used as ylabel + axes = df.plot(kind=kind, ylabel=new_label, xlabel=new_label, subplots=True) + assert all(ax.get_ylabel() == str(new_label) for ax in axes) + assert all(ax.get_xlabel() == str(new_label) for ax in axes) + + @pytest.mark.parametrize( + "kwargs", + [ + # stacked center + {"kind": "bar", "stacked": True}, + {"kind": "bar", "stacked": True, "width": 0.9}, + {"kind": "barh", "stacked": True}, + {"kind": "barh", "stacked": True, "width": 0.9}, + # center + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": False, "width": 0.9}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": False, "width": 0.9}, + # subplots center + {"kind": "bar", "subplots": True}, + {"kind": "bar", "subplots": True, "width": 0.9}, + {"kind": "barh", "subplots": True}, + {"kind": "barh", "subplots": True, "width": 0.9}, + # align edge + {"kind": "bar", "stacked": True, "align": "edge"}, + {"kind": "bar", "stacked": True, "width": 0.9, "align": "edge"}, + {"kind": "barh", "stacked": True, "align": "edge"}, + {"kind": "barh", "stacked": True, "width": 0.9, "align": "edge"}, + {"kind": "bar", "stacked": False, "align": "edge"}, + {"kind": "bar", "stacked": False, "width": 0.9, "align": "edge"}, + {"kind": "barh", "stacked": False, "align": "edge"}, + {"kind": "barh", "stacked": False, "width": 0.9, "align": "edge"}, + {"kind": "bar", "subplots": True, "align": "edge"}, + {"kind": "bar", "subplots": True, "width": 0.9, "align": "edge"}, + {"kind": "barh", "subplots": True, "align": "edge"}, + {"kind": "barh", "subplots": True, "width": 0.9, "align": "edge"}, + ], + ) + def test_bar_align_multiple_columns(self, kwargs): + # GH2157 + df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5)) + self._check_bar_alignment(df, **kwargs) + + @pytest.mark.parametrize( + "kwargs", + [ + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": True}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": True}, + {"kind": "bar", "subplots": True}, + {"kind": "barh", "subplots": True}, + ], + ) + def test_bar_align_single_column(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal(5)) + self._check_bar_alignment(df, **kwargs) + + @pytest.mark.parametrize( + "kwargs", + [ + {"kind": "bar", "stacked": False}, + {"kind": "bar", "stacked": True}, + {"kind": "barh", "stacked": False}, + {"kind": "barh", "stacked": True}, + {"kind": "bar", "subplots": True}, + {"kind": "barh", "subplots": True}, + ], + ) + def test_bar_barwidth_position(self, kwargs): + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + self._check_bar_alignment(df, width=0.9, position=0.2, **kwargs) + + @pytest.mark.parametrize("w", [1, 1.0]) + def test_bar_barwidth_position_int(self, w): + # GH 12979 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + ax = df.plot.bar(stacked=True, width=w) + ticks = ax.xaxis.get_ticklocs() + tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4])) + assert ax.get_xlim() == (-0.75, 4.75) + # check left-edge of bars + assert ax.patches[0].get_x() == -0.5 + assert ax.patches[-1].get_x() == 3.5 + + @pytest.mark.parametrize( + "kind, kwargs", + [ + ["bar", {"stacked": True}], + ["barh", {"stacked": False}], + ["barh", {"stacked": True}], + ["bar", {"subplots": True}], + ["barh", {"subplots": True}], + ], + ) + def test_bar_barwidth_position_int_width_1(self, kind, kwargs): + # GH 12979 + df = DataFrame(np.random.default_rng(2).standard_normal((5, 5))) + self._check_bar_alignment(df, kind=kind, width=1, **kwargs) + + def _check_bar_alignment( + self, + df, + kind="bar", + stacked=False, + subplots=False, + align="center", + width=0.5, + position=0.5, + ): + axes = df.plot( + kind=kind, + stacked=stacked, + subplots=subplots, + align=align, + width=width, + position=position, + grid=True, + ) + + axes = _flatten_visible(axes) + + for ax in axes: + if kind == "bar": + axis = ax.xaxis + ax_min, ax_max = ax.get_xlim() + min_edge = min(p.get_x() for p in ax.patches) + max_edge = max(p.get_x() + p.get_width() for p in ax.patches) + elif kind == "barh": + axis = ax.yaxis + ax_min, ax_max = ax.get_ylim() + min_edge = min(p.get_y() for p in ax.patches) + max_edge = max(p.get_y() + p.get_height() for p in ax.patches) + else: + raise ValueError + + # GH 7498 + # compare margins between lim and bar edges + tm.assert_almost_equal(ax_min, min_edge - 0.25) + tm.assert_almost_equal(ax_max, max_edge + 0.25) + + p = ax.patches[0] + if kind == "bar" and (stacked is True or subplots is True): + edge = p.get_x() + center = edge + p.get_width() * position + elif kind == "bar" and stacked is False: + center = p.get_x() + p.get_width() * len(df.columns) * position + edge = p.get_x() + elif kind == "barh" and (stacked is True or subplots is True): + center = p.get_y() + p.get_height() * position + edge = p.get_y() + elif kind == "barh" and stacked is False: + center = p.get_y() + p.get_height() * len(df.columns) * position + edge = p.get_y() + else: + raise ValueError + + # Check the ticks locates on integer + assert (axis.get_ticklocs() == np.arange(len(df))).all() + + if align == "center": + # Check whether the bar locates on center + tm.assert_almost_equal(axis.get_ticklocs()[0], center) + elif align == "edge": + # Check whether the bar's edge starts from the tick + tm.assert_almost_equal(axis.get_ticklocs()[0], edge) + else: + raise ValueError + + return axes diff --git a/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py new file mode 100644 index 0000000000000000000000000000000000000000..a9250fa8347cc04fa34c28b016e1fb27d837284f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py @@ -0,0 +1,342 @@ +import re + +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.plotting.common import ( + _check_axes_shape, + _check_plot_works, + get_x_axis, + get_y_axis, +) + +pytest.importorskip("matplotlib") + + +@pytest.fixture +def hist_df(): + df = DataFrame( + np.random.default_rng(2).standard_normal((30, 2)), columns=["A", "B"] + ) + df["C"] = np.random.default_rng(2).choice(["a", "b", "c"], 30) + df["D"] = np.random.default_rng(2).choice(["a", "b", "c"], 30) + return df + + +class TestHistWithBy: + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, titles, legends", + [ + ("C", "A", ["a", "b", "c"], [["A"]] * 3), + ("C", ["A", "B"], ["a", "b", "c"], [["A", "B"]] * 3), + ("C", None, ["a", "b", "c"], [["A", "B"]] * 3), + ( + ["C", "D"], + "A", + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A"]] * 3, + ), + ( + ["C", "D"], + ["A", "B"], + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A", "B"]] * 3, + ), + ( + ["C", "D"], + None, + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A", "B"]] * 3, + ), + ], + ) + def test_hist_plot_by_argument(self, by, column, titles, legends, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.hist, column=column, by=by, default_axes=True + ) + result_titles = [ax.get_title() for ax in axes] + result_legends = [ + [legend.get_text() for legend in ax.get_legend().texts] for ax in axes + ] + + assert result_legends == legends + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column, titles, legends", + [ + (0, "A", ["a", "b", "c"], [["A"]] * 3), + (0, None, ["a", "b", "c"], [["A", "B"]] * 3), + ( + [0, "D"], + "A", + [ + "(a, a)", + "(b, b)", + "(c, c)", + ], + [["A"]] * 3, + ), + ], + ) + def test_hist_plot_by_0(self, by, column, titles, legends, hist_df): + # GH 15079 + df = hist_df.copy() + df = df.rename(columns={"C": 0}) + + axes = _check_plot_works(df.plot.hist, default_axes=True, column=column, by=by) + result_titles = [ax.get_title() for ax in axes] + result_legends = [ + [legend.get_text() for legend in ax.get_legend().texts] for ax in axes + ] + + assert result_legends == legends + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column", + [ + ([], ["A"]), + ([], ["A", "B"]), + ((), None), + ((), ["A", "B"]), + ], + ) + def test_hist_plot_empty_list_string_tuple_by(self, by, column, hist_df): + # GH 15079 + msg = "No group keys passed" + with pytest.raises(ValueError, match=msg): + _check_plot_works( + hist_df.plot.hist, default_axes=True, column=column, by=by + ) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, layout, axes_num", + [ + (["C"], "A", (2, 2), 3), + ("C", "A", (2, 2), 3), + (["C"], ["A"], (1, 3), 3), + ("C", None, (3, 1), 3), + ("C", ["A", "B"], (3, 1), 3), + (["C", "D"], "A", (9, 1), 3), + (["C", "D"], "A", (3, 3), 3), + (["C", "D"], ["A"], (5, 2), 3), + (["C", "D"], ["A", "B"], (9, 1), 3), + (["C", "D"], None, (9, 1), 3), + (["C", "D"], ["A", "B"], (5, 2), 3), + ], + ) + def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df): + # GH 15079 + # _check_plot_works adds an ax so catch warning. see GH #13188 + with tm.assert_produces_warning(UserWarning, check_stacklevel=False): + axes = _check_plot_works( + hist_df.plot.hist, column=column, by=by, layout=layout + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + @pytest.mark.parametrize( + "msg, by, layout", + [ + ("larger than required size", ["C", "D"], (1, 1)), + (re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)), + ("At least one dimension of layout must be positive", "C", (-1, -1)), + ], + ) + def test_hist_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df): + # GH 15079, test if error is raised when invalid layout is given + + with pytest.raises(ValueError, match=msg): + hist_df.plot.hist(column=["A", "B"], by=by, layout=layout) + + @pytest.mark.slow + def test_axis_share_x_with_by(self, hist_df): + # GH 15079 + ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharex=True) + + # share x + assert get_x_axis(ax1).joined(ax1, ax2) + assert get_x_axis(ax2).joined(ax1, ax2) + assert get_x_axis(ax3).joined(ax1, ax3) + assert get_x_axis(ax3).joined(ax2, ax3) + + # don't share y + assert not get_y_axis(ax1).joined(ax1, ax2) + assert not get_y_axis(ax2).joined(ax1, ax2) + assert not get_y_axis(ax3).joined(ax1, ax3) + assert not get_y_axis(ax3).joined(ax2, ax3) + + @pytest.mark.slow + def test_axis_share_y_with_by(self, hist_df): + # GH 15079 + ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharey=True) + + # share y + assert get_y_axis(ax1).joined(ax1, ax2) + assert get_y_axis(ax2).joined(ax1, ax2) + assert get_y_axis(ax3).joined(ax1, ax3) + assert get_y_axis(ax3).joined(ax2, ax3) + + # don't share x + assert not get_x_axis(ax1).joined(ax1, ax2) + assert not get_x_axis(ax2).joined(ax1, ax2) + assert not get_x_axis(ax3).joined(ax1, ax3) + assert not get_x_axis(ax3).joined(ax2, ax3) + + @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)]) + def test_figure_shape_hist_with_by(self, figsize, hist_df): + # GH 15079 + axes = hist_df.plot.hist(column="A", by="C", figsize=figsize) + _check_axes_shape(axes, axes_num=3, figsize=figsize) + + +class TestBoxWithBy: + @pytest.mark.parametrize( + "by, column, titles, xticklabels", + [ + ("C", "A", ["A"], [["a", "b", "c"]]), + ( + ["C", "D"], + "A", + ["A"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ], + ), + ("C", ["A", "B"], ["A", "B"], [["a", "b", "c"]] * 2), + ( + ["C", "D"], + ["A", "B"], + ["A", "B"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ] + * 2, + ), + (["C"], None, ["A", "B"], [["a", "b", "c"]] * 2), + ], + ) + def test_box_plot_by_argument(self, by, column, titles, xticklabels, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.box, default_axes=True, column=column, by=by + ) + result_titles = [ax.get_title() for ax in axes] + result_xticklabels = [ + [label.get_text() for label in ax.get_xticklabels()] for ax in axes + ] + + assert result_xticklabels == xticklabels + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column, titles, xticklabels", + [ + (0, "A", ["A"], [["a", "b", "c"]]), + ( + [0, "D"], + "A", + ["A"], + [ + [ + "(a, a)", + "(b, b)", + "(c, c)", + ] + ], + ), + (0, None, ["A", "B"], [["a", "b", "c"]] * 2), + ], + ) + def test_box_plot_by_0(self, by, column, titles, xticklabels, hist_df): + # GH 15079 + df = hist_df.copy() + df = df.rename(columns={"C": 0}) + + axes = _check_plot_works(df.plot.box, default_axes=True, column=column, by=by) + result_titles = [ax.get_title() for ax in axes] + result_xticklabels = [ + [label.get_text() for label in ax.get_xticklabels()] for ax in axes + ] + + assert result_xticklabels == xticklabels + assert result_titles == titles + + @pytest.mark.parametrize( + "by, column", + [ + ([], ["A"]), + ((), "A"), + ([], None), + ((), ["A", "B"]), + ], + ) + def test_box_plot_with_none_empty_list_by(self, by, column, hist_df): + # GH 15079 + msg = "No group keys passed" + with pytest.raises(ValueError, match=msg): + _check_plot_works(hist_df.plot.box, default_axes=True, column=column, by=by) + + @pytest.mark.slow + @pytest.mark.parametrize( + "by, column, layout, axes_num", + [ + (["C"], "A", (1, 1), 1), + ("C", "A", (1, 1), 1), + ("C", None, (2, 1), 2), + ("C", ["A", "B"], (1, 2), 2), + (["C", "D"], "A", (1, 1), 1), + (["C", "D"], None, (1, 2), 2), + ], + ) + def test_box_plot_layout_with_by(self, by, column, layout, axes_num, hist_df): + # GH 15079 + axes = _check_plot_works( + hist_df.plot.box, default_axes=True, column=column, by=by, layout=layout + ) + _check_axes_shape(axes, axes_num=axes_num, layout=layout) + + @pytest.mark.parametrize( + "msg, by, layout", + [ + ("larger than required size", ["C", "D"], (1, 1)), + (re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)), + ("At least one dimension of layout must be positive", "C", (-1, -1)), + ], + ) + def test_box_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df): + # GH 15079, test if error is raised when invalid layout is given + + with pytest.raises(ValueError, match=msg): + hist_df.plot.box(column=["A", "B"], by=by, layout=layout) + + @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)]) + def test_figure_shape_hist_with_by(self, figsize, hist_df): + # GH 15079 + axes = hist_df.plot.box(column="A", by="C", figsize=figsize) + _check_axes_shape(axes, axes_num=1, figsize=figsize) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99ee4266f67ea266b2f4c407045db855e4291d1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69bb390c903ad96e27edc5114d047174a5f5be02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e033032450945985553269248ecb52079fc002e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efeba62fe6faec23df123299b16054b86adb0004 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb4e80d0601ca72d0d4d4c5031b0c80295cd414f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04882be48961ca2d010dff8683928e366546461b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55a98735d118c3a182a51ca8bc326ccb08be829f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f5b76b6daa4f3c0282789e00a2c58a99965381d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c3e03378324bca898f7311d4316d5868be432a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1bc51ef89b14318d2edcf1e2deca84a00158f40 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..603763227cb888cb716692ddb82d206ba6812c90 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py @@ -0,0 +1,192 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) +import pandas._testing as tm + + +class TestIntervalArithmetic: + def test_interval_add(self, closed): + interval = Interval(0, 1, closed=closed) + expected = Interval(1, 2, closed=closed) + + result = interval + 1 + assert result == expected + + result = 1 + interval + assert result == expected + + result = interval + result += 1 + assert result == expected + + msg = r"unsupported operand type\(s\) for \+" + with pytest.raises(TypeError, match=msg): + interval + interval + + with pytest.raises(TypeError, match=msg): + interval + "foo" + + def test_interval_sub(self, closed): + interval = Interval(0, 1, closed=closed) + expected = Interval(-1, 0, closed=closed) + + result = interval - 1 + assert result == expected + + result = interval + result -= 1 + assert result == expected + + msg = r"unsupported operand type\(s\) for -" + with pytest.raises(TypeError, match=msg): + interval - interval + + with pytest.raises(TypeError, match=msg): + interval - "foo" + + def test_interval_mult(self, closed): + interval = Interval(0, 1, closed=closed) + expected = Interval(0, 2, closed=closed) + + result = interval * 2 + assert result == expected + + result = 2 * interval + assert result == expected + + result = interval + result *= 2 + assert result == expected + + msg = r"unsupported operand type\(s\) for \*" + with pytest.raises(TypeError, match=msg): + interval * interval + + msg = r"can\'t multiply sequence by non-int" + with pytest.raises(TypeError, match=msg): + interval * "foo" + + def test_interval_div(self, closed): + interval = Interval(0, 1, closed=closed) + expected = Interval(0, 0.5, closed=closed) + + result = interval / 2.0 + assert result == expected + + result = interval + result /= 2.0 + assert result == expected + + msg = r"unsupported operand type\(s\) for /" + with pytest.raises(TypeError, match=msg): + interval / interval + + with pytest.raises(TypeError, match=msg): + interval / "foo" + + def test_interval_floordiv(self, closed): + interval = Interval(1, 2, closed=closed) + expected = Interval(0, 1, closed=closed) + + result = interval // 2 + assert result == expected + + result = interval + result //= 2 + assert result == expected + + msg = r"unsupported operand type\(s\) for //" + with pytest.raises(TypeError, match=msg): + interval // interval + + with pytest.raises(TypeError, match=msg): + interval // "foo" + + @pytest.mark.parametrize("method", ["__add__", "__sub__"]) + @pytest.mark.parametrize( + "interval", + [ + Interval( + Timestamp("2017-01-01 00:00:00"), Timestamp("2018-01-01 00:00:00") + ), + Interval(Timedelta(days=7), Timedelta(days=14)), + ], + ) + @pytest.mark.parametrize( + "delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")] + ) + def test_time_interval_add_subtract_timedelta(self, interval, delta, method): + # https://github.com/pandas-dev/pandas/issues/32023 + result = getattr(interval, method)(delta) + left = getattr(interval.left, method)(delta) + right = getattr(interval.right, method)(delta) + expected = Interval(left, right) + + assert result == expected + + @pytest.mark.parametrize("interval", [Interval(1, 2), Interval(1.0, 2.0)]) + @pytest.mark.parametrize( + "delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")] + ) + def test_numeric_interval_add_timedelta_raises(self, interval, delta): + # https://github.com/pandas-dev/pandas/issues/32023 + msg = "|".join( + [ + "unsupported operand", + "cannot use operands", + "Only numeric, Timestamp and Timedelta endpoints are allowed", + ] + ) + with pytest.raises((TypeError, ValueError), match=msg): + interval + delta + + with pytest.raises((TypeError, ValueError), match=msg): + delta + interval + + @pytest.mark.parametrize("klass", [timedelta, np.timedelta64, Timedelta]) + def test_timedelta_add_timestamp_interval(self, klass): + delta = klass(0) + expected = Interval(Timestamp("2020-01-01"), Timestamp("2020-02-01")) + + result = delta + expected + assert result == expected + + result = expected + delta + assert result == expected + + +class TestIntervalComparisons: + def test_interval_equal(self): + assert Interval(0, 1) == Interval(0, 1, closed="right") + assert Interval(0, 1) != Interval(0, 1, closed="left") + assert Interval(0, 1) != 0 + + def test_interval_comparison(self): + msg = ( + "'<' not supported between instances of " + "'pandas._libs.interval.Interval' and 'int'" + ) + with pytest.raises(TypeError, match=msg): + Interval(0, 1) < 2 + + assert Interval(0, 1) < Interval(1, 2) + assert Interval(0, 1) < Interval(0, 2) + assert Interval(0, 1) < Interval(0.5, 1.5) + assert Interval(0, 1) <= Interval(0, 1) + assert Interval(0, 1) > Interval(-1, 2) + assert Interval(0, 1) >= Interval(0, 1) + + def test_equality_comparison_broadcasts_over_array(self): + # https://github.com/pandas-dev/pandas/issues/35931 + interval = Interval(0, 1) + arr = np.array([interval, interval]) + result = interval == arr + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..a4bc00b923434f8d62c8332b3104696051fd287e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py @@ -0,0 +1,51 @@ +import pytest + +from pandas import ( + Interval, + Period, + Timestamp, +) + + +class TestIntervalConstructors: + @pytest.mark.parametrize( + "left, right", + [ + ("a", "z"), + (("a", "b"), ("c", "d")), + (list("AB"), list("ab")), + (Interval(0, 1), Interval(1, 2)), + (Period("2018Q1", freq="Q"), Period("2018Q1", freq="Q")), + ], + ) + def test_construct_errors(self, left, right): + # GH#23013 + msg = "Only numeric, Timestamp and Timedelta endpoints are allowed" + with pytest.raises(ValueError, match=msg): + Interval(left, right) + + def test_constructor_errors(self): + msg = "invalid option for 'closed': foo" + with pytest.raises(ValueError, match=msg): + Interval(0, 1, closed="foo") + + msg = "left side of interval must be <= right side" + with pytest.raises(ValueError, match=msg): + Interval(1, 0) + + @pytest.mark.parametrize( + "tz_left, tz_right", [(None, "UTC"), ("UTC", None), ("UTC", "US/Eastern")] + ) + def test_constructor_errors_tz(self, tz_left, tz_right): + # GH#18538 + left = Timestamp("2017-01-01", tz=tz_left) + right = Timestamp("2017-01-02", tz=tz_right) + + if tz_left is None or tz_right is None: + error = TypeError + msg = "Cannot compare tz-naive and tz-aware timestamps" + else: + error = ValueError + msg = "left and right must have the same time zone" + with pytest.raises(error, match=msg): + Interval(left, right) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py new file mode 100644 index 0000000000000000000000000000000000000000..8dfca117a658b2a163ef35699c903ad14a032062 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py @@ -0,0 +1,73 @@ +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +class TestContains: + def test_contains(self): + interval = Interval(0, 1) + assert 0.5 in interval + assert 1 in interval + assert 0 not in interval + + interval_both = Interval(0, 1, "both") + assert 0 in interval_both + assert 1 in interval_both + + interval_neither = Interval(0, 1, closed="neither") + assert 0 not in interval_neither + assert 0.5 in interval_neither + assert 1 not in interval_neither + + def test_contains_interval(self, inclusive_endpoints_fixture): + interval1 = Interval(0, 1, "both") + interval2 = Interval(0, 1, inclusive_endpoints_fixture) + assert interval1 in interval1 + assert interval2 in interval2 + assert interval2 in interval1 + assert interval1 not in interval2 or inclusive_endpoints_fixture == "both" + + def test_contains_infinite_length(self): + interval1 = Interval(0, 1, "both") + interval2 = Interval(float("-inf"), float("inf"), "neither") + assert interval1 in interval2 + assert interval2 not in interval1 + + def test_contains_zero_length(self): + interval1 = Interval(0, 1, "both") + interval2 = Interval(-1, -1, "both") + interval3 = Interval(0.5, 0.5, "both") + assert interval2 not in interval1 + assert interval3 in interval1 + assert interval2 not in interval3 and interval3 not in interval2 + assert interval1 not in interval2 and interval1 not in interval3 + + @pytest.mark.parametrize( + "type1", + [ + (0, 1), + (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)), + (Timedelta("0h"), Timedelta("1h")), + ], + ) + @pytest.mark.parametrize( + "type2", + [ + (0, 1), + (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)), + (Timedelta("0h"), Timedelta("1h")), + ], + ) + def test_contains_mixed_types(self, type1, type2): + interval1 = Interval(*type1) + interval2 = Interval(*type2) + if type1 == type2: + assert interval1 in interval2 + else: + msg = "^'<=' not supported between instances of" + with pytest.raises(TypeError, match=msg): + interval1 in interval2 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..6bf7aa91df3cebc41712c611aaa3781b638009d1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py @@ -0,0 +1,11 @@ +from pandas import Interval + + +def test_interval_repr(): + interval = Interval(0, 1) + assert repr(interval) == "Interval(0, 1, closed='right')" + assert str(interval) == "(0, 1]" + + interval_left = Interval(0, 1, closed="left") + assert repr(interval_left) == "Interval(0, 1, closed='left')" + assert str(interval_left) == "[0, 1)" diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..91b31e82f9c524f87e2849360cfd44b2f77b0c9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py @@ -0,0 +1,87 @@ +import numpy as np +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +@pytest.fixture +def interval(): + return Interval(0, 1) + + +class TestInterval: + def test_properties(self, interval): + assert interval.closed == "right" + assert interval.left == 0 + assert interval.right == 1 + assert interval.mid == 0.5 + + def test_hash(self, interval): + # should not raise + hash(interval) + + @pytest.mark.parametrize( + "left, right, expected", + [ + (0, 5, 5), + (-2, 5.5, 7.5), + (10, 10, 0), + (10, np.inf, np.inf), + (-np.inf, -5, np.inf), + (-np.inf, np.inf, np.inf), + (Timedelta("0 days"), Timedelta("5 days"), Timedelta("5 days")), + (Timedelta("10 days"), Timedelta("10 days"), Timedelta("0 days")), + (Timedelta("1h10min"), Timedelta("5h5min"), Timedelta("3h55min")), + (Timedelta("5s"), Timedelta("1h"), Timedelta("59min55s")), + ], + ) + def test_length(self, left, right, expected): + # GH 18789 + iv = Interval(left, right) + result = iv.length + assert result == expected + + @pytest.mark.parametrize( + "left, right, expected", + [ + ("2017-01-01", "2017-01-06", "5 days"), + ("2017-01-01", "2017-01-01 12:00:00", "12 hours"), + ("2017-01-01 12:00", "2017-01-01 12:00:00", "0 days"), + ("2017-01-01 12:01", "2017-01-05 17:31:00", "4 days 5 hours 30 min"), + ], + ) + @pytest.mark.parametrize("tz", (None, "UTC", "CET", "US/Eastern")) + def test_length_timestamp(self, tz, left, right, expected): + # GH 18789 + iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz)) + result = iv.length + expected = Timedelta(expected) + assert result == expected + + @pytest.mark.parametrize( + "left, right", + [ + (0, 1), + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timestamp("2018-01-02")), + ( + Timestamp("2018-01-01", tz="US/Eastern"), + Timestamp("2018-01-02", tz="US/Eastern"), + ), + ], + ) + def test_is_empty(self, left, right, closed): + # GH27219 + # non-empty always return False + iv = Interval(left, right, closed) + assert iv.is_empty is False + + # same endpoint is empty except when closed='both' (contains one point) + iv = Interval(left, left, closed) + result = iv.is_empty + expected = closed != "both" + assert result is expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py new file mode 100644 index 0000000000000000000000000000000000000000..7fcf59d7bb4afc0077884de68dc335aff25c2cc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py @@ -0,0 +1,67 @@ +import pytest + +from pandas import ( + Interval, + Timedelta, + Timestamp, +) + + +@pytest.fixture( + params=[ + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timedelta("1 day")), + (0, 1), + ], + ids=lambda x: type(x[0]).__name__, +) +def start_shift(request): + """ + Fixture for generating intervals of types from a start value and a shift + value that can be added to start to generate an endpoint + """ + return request.param + + +class TestOverlaps: + def test_overlaps_self(self, start_shift, closed): + start, shift = start_shift + interval = Interval(start, start + shift, closed) + assert interval.overlaps(interval) + + def test_overlaps_nested(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + 3 * shift, other_closed) + interval2 = Interval(start + shift, start + 2 * shift, closed) + + # nested intervals should always overlap + assert interval1.overlaps(interval2) + + def test_overlaps_disjoint(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + shift, other_closed) + interval2 = Interval(start + 2 * shift, start + 3 * shift, closed) + + # disjoint intervals should never overlap + assert not interval1.overlaps(interval2) + + def test_overlaps_endpoint(self, start_shift, closed, other_closed): + start, shift = start_shift + interval1 = Interval(start, start + shift, other_closed) + interval2 = Interval(start + shift, start + 2 * shift, closed) + + # overlap if shared endpoint is closed for both (overlap at a point) + result = interval1.overlaps(interval2) + expected = interval1.closed_right and interval2.closed_left + assert result == expected + + @pytest.mark.parametrize( + "other", + [10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")], + ids=lambda x: type(x).__name__, + ) + def test_overlaps_invalid_type(self, other): + interval = Interval(0, 1) + msg = f"`other` must be an Interval, got {type(other).__name__}" + with pytest.raises(TypeError, match=msg): + interval.overlaps(other) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6645f5be59341559393d5d504b506a5fdd583ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_arithmetic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a15285903b9a3af222c6dfb133c8fd19733df21 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..523830741a72c19184ae1228a6bde6cfe7acf5e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_formats.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..008b21d96e5c46b2ae87ab40bb2d5f9cd27ee3cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_formats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_timedelta.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_timedelta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..058da211b7aa3da6c89b7012cd65b092752ee844 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/__pycache__/test_timedelta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..138f46ecef31dde1cea7eb8f9d6e517da9f13c0f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_as_unit.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_as_unit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4cc8ca0dddceff9703f78c0c5db279a4bf4c348c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_as_unit.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_round.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_round.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e798ac5dae7f9a31b325229d890921a268b0088d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/__pycache__/test_round.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_as_unit.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_as_unit.py new file mode 100644 index 0000000000000000000000000000000000000000..8660141e5a5372d4bb8e921bc8b3a5ee148e8900 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_as_unit.py @@ -0,0 +1,80 @@ +import pytest + +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.errors import OutOfBoundsTimedelta + +from pandas import Timedelta + + +class TestAsUnit: + def test_as_unit(self): + td = Timedelta(days=1) + + assert td.as_unit("ns") is td + + res = td.as_unit("us") + assert res._value == td._value // 1000 + assert res._creso == NpyDatetimeUnit.NPY_FR_us.value + + rt = res.as_unit("ns") + assert rt._value == td._value + assert rt._creso == td._creso + + res = td.as_unit("ms") + assert res._value == td._value // 1_000_000 + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + + rt = res.as_unit("ns") + assert rt._value == td._value + assert rt._creso == td._creso + + res = td.as_unit("s") + assert res._value == td._value // 1_000_000_000 + assert res._creso == NpyDatetimeUnit.NPY_FR_s.value + + rt = res.as_unit("ns") + assert rt._value == td._value + assert rt._creso == td._creso + + def test_as_unit_overflows(self): + # microsecond that would be just out of bounds for nano + us = 9223372800000000 + td = Timedelta._from_value_and_reso(us, NpyDatetimeUnit.NPY_FR_us.value) + + msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td.as_unit("ns") + + res = td.as_unit("ms") + assert res._value == us // 1000 + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + + def test_as_unit_rounding(self): + td = Timedelta(microseconds=1500) + res = td.as_unit("ms") + + expected = Timedelta(milliseconds=1) + assert res == expected + + assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value + assert res._value == 1 + + with pytest.raises(ValueError, match="Cannot losslessly convert units"): + td.as_unit("ms", round_ok=False) + + def test_as_unit_non_nano(self): + # case where we are going neither to nor from nano + td = Timedelta(days=1).as_unit("ms") + assert td.days == 1 + assert td._value == 86_400_000 + assert td.components.days == 1 + assert td._d == 1 + assert td.total_seconds() == 86400 + + res = td.as_unit("us") + assert res._value == 86_400_000_000 + assert res.components.days == 1 + assert res.components.hours == 0 + assert res._d == 1 + assert res._h == 0 + assert res.total_seconds() == 86400 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_round.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_round.py new file mode 100644 index 0000000000000000000000000000000000000000..e54adb27d126bf13b454c513cecf847cdbb623bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/methods/test_round.py @@ -0,0 +1,187 @@ +from hypothesis import ( + given, + strategies as st, +) +import numpy as np +import pytest + +from pandas._libs import lib +from pandas._libs.tslibs import iNaT +from pandas.errors import OutOfBoundsTimedelta + +from pandas import Timedelta + + +class TestTimedeltaRound: + @pytest.mark.parametrize( + "freq,s1,s2", + [ + # This first case has s1, s2 being the same as t1,t2 below + ( + "ns", + Timedelta("1 days 02:34:56.789123456"), + Timedelta("-1 days 02:34:56.789123456"), + ), + ( + "us", + Timedelta("1 days 02:34:56.789123000"), + Timedelta("-1 days 02:34:56.789123000"), + ), + ( + "ms", + Timedelta("1 days 02:34:56.789000000"), + Timedelta("-1 days 02:34:56.789000000"), + ), + ("s", Timedelta("1 days 02:34:57"), Timedelta("-1 days 02:34:57")), + ("2s", Timedelta("1 days 02:34:56"), Timedelta("-1 days 02:34:56")), + ("5s", Timedelta("1 days 02:34:55"), Timedelta("-1 days 02:34:55")), + ("min", Timedelta("1 days 02:35:00"), Timedelta("-1 days 02:35:00")), + ("12min", Timedelta("1 days 02:36:00"), Timedelta("-1 days 02:36:00")), + ("h", Timedelta("1 days 03:00:00"), Timedelta("-1 days 03:00:00")), + ("d", Timedelta("1 days"), Timedelta("-1 days")), + ], + ) + def test_round(self, freq, s1, s2): + t1 = Timedelta("1 days 02:34:56.789123456") + t2 = Timedelta("-1 days 02:34:56.789123456") + + r1 = t1.round(freq) + assert r1 == s1 + r2 = t2.round(freq) + assert r2 == s2 + + def test_round_invalid(self): + t1 = Timedelta("1 days 02:34:56.789123456") + + for freq, msg in [ + ("YE", " is a non-fixed frequency"), + ("ME", " is a non-fixed frequency"), + ("foobar", "Invalid frequency: foobar"), + ]: + with pytest.raises(ValueError, match=msg): + t1.round(freq) + + @pytest.mark.skip_ubsan + def test_round_implementation_bounds(self): + # See also: analogous test for Timestamp + # GH#38964 + result = Timedelta.min.ceil("s") + expected = Timedelta.min + Timedelta(seconds=1) - Timedelta(145224193) + assert result == expected + + result = Timedelta.max.floor("s") + expected = Timedelta.max - Timedelta(854775807) + assert result == expected + + msg = ( + r"Cannot round -106752 days \+00:12:43.145224193 to freq=s without overflow" + ) + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta.min.floor("s") + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta.min.round("s") + + msg = "Cannot round 106751 days 23:47:16.854775807 to freq=s without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta.max.ceil("s") + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta.max.round("s") + + @pytest.mark.skip_ubsan + @given(val=st.integers(min_value=iNaT + 1, max_value=lib.i8max)) + @pytest.mark.parametrize( + "method", [Timedelta.round, Timedelta.floor, Timedelta.ceil] + ) + def test_round_sanity(self, val, method): + cls = Timedelta + err_cls = OutOfBoundsTimedelta + + val = np.int64(val) + td = cls(val) + + def checker(ts, nanos, unit): + # First check that we do raise in cases where we should + if nanos == 1: + pass + else: + div, mod = divmod(ts._value, nanos) + diff = int(nanos - mod) + lb = ts._value - mod + assert lb <= ts._value # i.e. no overflows with python ints + ub = ts._value + diff + assert ub > ts._value # i.e. no overflows with python ints + + msg = "without overflow" + if mod == 0: + # We should never be raising in this + pass + elif method is cls.ceil: + if ub > cls.max._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif method is cls.floor: + if lb < cls.min._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif mod >= diff: + if ub > cls.max._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + elif lb < cls.min._value: + with pytest.raises(err_cls, match=msg): + method(ts, unit) + return + + res = method(ts, unit) + + td = res - ts + diff = abs(td._value) + assert diff < nanos + assert res._value % nanos == 0 + + if method is cls.round: + assert diff <= nanos / 2 + elif method is cls.floor: + assert res <= ts + elif method is cls.ceil: + assert res >= ts + + nanos = 1 + checker(td, nanos, "ns") + + nanos = 1000 + checker(td, nanos, "us") + + nanos = 1_000_000 + checker(td, nanos, "ms") + + nanos = 1_000_000_000 + checker(td, nanos, "s") + + nanos = 60 * 1_000_000_000 + checker(td, nanos, "min") + + nanos = 60 * 60 * 1_000_000_000 + checker(td, nanos, "h") + + nanos = 24 * 60 * 60 * 1_000_000_000 + checker(td, nanos, "D") + + @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) + def test_round_non_nano(self, unit): + td = Timedelta("1 days 02:34:57").as_unit(unit) + + res = td.round("min") + assert res == Timedelta("1 days 02:35:00") + assert res._creso == td._creso + + res = td.floor("min") + assert res == Timedelta("1 days 02:34:00") + assert res._creso == td._creso + + res = td.ceil("min") + assert res == Timedelta("1 days 02:35:00") + assert res._creso == td._creso diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..d2fa0f722ca6fa9d8c671896c68032799c0f1166 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_arithmetic.py @@ -0,0 +1,1182 @@ +""" +Tests for scalar Timedelta arithmetic ops +""" +from datetime import ( + datetime, + timedelta, +) +import operator + +import numpy as np +import pytest + +from pandas.errors import OutOfBoundsTimedelta + +import pandas as pd +from pandas import ( + NaT, + Timedelta, + Timestamp, + offsets, +) +import pandas._testing as tm +from pandas.core import ops + + +class TestTimedeltaAdditionSubtraction: + """ + Tests for Timedelta methods: + + __add__, __radd__, + __sub__, __rsub__ + """ + + @pytest.mark.parametrize( + "ten_seconds", + [ + Timedelta(10, unit="s"), + timedelta(seconds=10), + np.timedelta64(10, "s"), + np.timedelta64(10000000000, "ns"), + offsets.Second(10), + ], + ) + def test_td_add_sub_ten_seconds(self, ten_seconds): + # GH#6808 + base = Timestamp("20130101 09:01:12.123456") + expected_add = Timestamp("20130101 09:01:22.123456") + expected_sub = Timestamp("20130101 09:01:02.123456") + + result = base + ten_seconds + assert result == expected_add + + result = base - ten_seconds + assert result == expected_sub + + @pytest.mark.parametrize( + "one_day_ten_secs", + [ + Timedelta("1 day, 00:00:10"), + Timedelta("1 days, 00:00:10"), + timedelta(days=1, seconds=10), + np.timedelta64(1, "D") + np.timedelta64(10, "s"), + offsets.Day() + offsets.Second(10), + ], + ) + def test_td_add_sub_one_day_ten_seconds(self, one_day_ten_secs): + # GH#6808 + base = Timestamp("20130102 09:01:12.123456") + expected_add = Timestamp("20130103 09:01:22.123456") + expected_sub = Timestamp("20130101 09:01:02.123456") + + result = base + one_day_ten_secs + assert result == expected_add + + result = base - one_day_ten_secs + assert result == expected_sub + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_datetimelike_scalar(self, op): + # GH#19738 + td = Timedelta(10, unit="d") + + result = op(td, datetime(2016, 1, 1)) + if op is operator.add: + # datetime + Timedelta does _not_ call Timedelta.__radd__, + # so we get a datetime back instead of a Timestamp + assert isinstance(result, Timestamp) + assert result == Timestamp(2016, 1, 11) + + result = op(td, Timestamp("2018-01-12 18:09")) + assert isinstance(result, Timestamp) + assert result == Timestamp("2018-01-22 18:09") + + result = op(td, np.datetime64("2018-01-12")) + assert isinstance(result, Timestamp) + assert result == Timestamp("2018-01-22") + + result = op(td, NaT) + assert result is NaT + + def test_td_add_timestamp_overflow(self): + ts = Timestamp("1700-01-01").as_unit("ns") + msg = "Cannot cast 259987 from D to 'ns' without overflow." + with pytest.raises(OutOfBoundsTimedelta, match=msg): + ts + Timedelta(13 * 19999, unit="D") + + msg = "Cannot cast 259987 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + ts + timedelta(days=13 * 19999) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_td(self, op): + td = Timedelta(10, unit="d") + + result = op(td, Timedelta(days=10)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=20) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_pytimedelta(self, op): + td = Timedelta(10, unit="d") + result = op(td, timedelta(days=9)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=19) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_timedelta64(self, op): + td = Timedelta(10, unit="d") + result = op(td, np.timedelta64(-4, "D")) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=6) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_offset(self, op): + td = Timedelta(10, unit="d") + + result = op(td, offsets.Hour(6)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=10, hours=6) + + def test_td_sub_td(self): + td = Timedelta(10, unit="d") + expected = Timedelta(0, unit="ns") + result = td - td + assert isinstance(result, Timedelta) + assert result == expected + + def test_td_sub_pytimedelta(self): + td = Timedelta(10, unit="d") + expected = Timedelta(0, unit="ns") + + result = td - td.to_pytimedelta() + assert isinstance(result, Timedelta) + assert result == expected + + result = td.to_pytimedelta() - td + assert isinstance(result, Timedelta) + assert result == expected + + def test_td_sub_timedelta64(self): + td = Timedelta(10, unit="d") + expected = Timedelta(0, unit="ns") + + result = td - td.to_timedelta64() + assert isinstance(result, Timedelta) + assert result == expected + + result = td.to_timedelta64() - td + assert isinstance(result, Timedelta) + assert result == expected + + def test_td_sub_nat(self): + # In this context pd.NaT is treated as timedelta-like + td = Timedelta(10, unit="d") + result = td - NaT + assert result is NaT + + def test_td_sub_td64_nat(self): + td = Timedelta(10, unit="d") + td_nat = np.timedelta64("NaT") + + result = td - td_nat + assert result is NaT + + result = td_nat - td + assert result is NaT + + def test_td_sub_offset(self): + td = Timedelta(10, unit="d") + result = td - offsets.Hour(1) + assert isinstance(result, Timedelta) + assert result == Timedelta(239, unit="h") + + def test_td_add_sub_numeric_raises(self): + td = Timedelta(10, unit="d") + msg = "unsupported operand type" + for other in [2, 2.0, np.int64(2), np.float64(2)]: + with pytest.raises(TypeError, match=msg): + td + other + with pytest.raises(TypeError, match=msg): + other + td + with pytest.raises(TypeError, match=msg): + td - other + with pytest.raises(TypeError, match=msg): + other - td + + def test_td_add_sub_int_ndarray(self): + td = Timedelta("1 day") + other = np.array([1]) + + msg = r"unsupported operand type\(s\) for \+: 'Timedelta' and 'int'" + with pytest.raises(TypeError, match=msg): + td + np.array([1]) + + msg = "|".join( + [ + ( + r"unsupported operand type\(s\) for \+: 'numpy.ndarray' " + "and 'Timedelta'" + ), + # This message goes on to say "Please do not rely on this error; + # it may not be given on all Python implementations" + "Concatenation operation is not implemented for NumPy arrays", + ] + ) + with pytest.raises(TypeError, match=msg): + other + td + msg = r"unsupported operand type\(s\) for -: 'Timedelta' and 'int'" + with pytest.raises(TypeError, match=msg): + td - other + msg = r"unsupported operand type\(s\) for -: 'numpy.ndarray' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + other - td + + def test_td_rsub_nat(self): + td = Timedelta(10, unit="d") + result = NaT - td + assert result is NaT + + result = np.datetime64("NaT") - td + assert result is NaT + + def test_td_rsub_offset(self): + result = offsets.Hour(1) - Timedelta(10, unit="d") + assert isinstance(result, Timedelta) + assert result == Timedelta(-239, unit="h") + + def test_td_sub_timedeltalike_object_dtype_array(self): + # GH#21980 + arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]) + exp = np.array([Timestamp("20121231 9:01"), Timestamp("20121229 9:02")]) + res = arr - Timedelta("1D") + tm.assert_numpy_array_equal(res, exp) + + def test_td_sub_mixed_most_timedeltalike_object_dtype_array(self): + # GH#21980 + now = Timestamp("2021-11-09 09:54:00") + arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")]) + exp = np.array( + [ + now - Timedelta("1D"), + Timedelta("0D"), + np.timedelta64(2, "h") - Timedelta("1D"), + ] + ) + res = arr - Timedelta("1D") + tm.assert_numpy_array_equal(res, exp) + + def test_td_rsub_mixed_most_timedeltalike_object_dtype_array(self): + # GH#21980 + now = Timestamp("2021-11-09 09:54:00") + arr = np.array([now, Timedelta("1D"), np.timedelta64(2, "h")]) + msg = r"unsupported operand type\(s\) for \-: 'Timedelta' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + Timedelta("1D") - arr + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_timedeltalike_object_dtype_array(self, op): + # GH#21980 + arr = np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]) + exp = np.array([Timestamp("20130102 9:01"), Timestamp("20121231 9:02")]) + res = op(arr, Timedelta("1D")) + tm.assert_numpy_array_equal(res, exp) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_td_add_mixed_timedeltalike_object_dtype_array(self, op): + # GH#21980 + now = Timestamp("2021-11-09 09:54:00") + arr = np.array([now, Timedelta("1D")]) + exp = np.array([now + Timedelta("1D"), Timedelta("2D")]) + res = op(arr, Timedelta("1D")) + tm.assert_numpy_array_equal(res, exp) + + def test_td_add_sub_td64_ndarray(self): + td = Timedelta("1 day") + + other = np.array([td.to_timedelta64()]) + expected = np.array([Timedelta("2 Days").to_timedelta64()]) + + result = td + other + tm.assert_numpy_array_equal(result, expected) + result = other + td + tm.assert_numpy_array_equal(result, expected) + + result = td - other + tm.assert_numpy_array_equal(result, expected * 0) + result = other - td + tm.assert_numpy_array_equal(result, expected * 0) + + def test_td_add_sub_dt64_ndarray(self): + td = Timedelta("1 day") + other = np.array(["2000-01-01"], dtype="M8[ns]") + + expected = np.array(["2000-01-02"], dtype="M8[ns]") + tm.assert_numpy_array_equal(td + other, expected) + tm.assert_numpy_array_equal(other + td, expected) + + expected = np.array(["1999-12-31"], dtype="M8[ns]") + tm.assert_numpy_array_equal(-td + other, expected) + tm.assert_numpy_array_equal(other - td, expected) + + def test_td_add_sub_ndarray_0d(self): + td = Timedelta("1 day") + other = np.array(td.asm8) + + result = td + other + assert isinstance(result, Timedelta) + assert result == 2 * td + + result = other + td + assert isinstance(result, Timedelta) + assert result == 2 * td + + result = other - td + assert isinstance(result, Timedelta) + assert result == 0 * td + + result = td - other + assert isinstance(result, Timedelta) + assert result == 0 * td + + +class TestTimedeltaMultiplicationDivision: + """ + Tests for Timedelta methods: + + __mul__, __rmul__, + __div__, __rdiv__, + __truediv__, __rtruediv__, + __floordiv__, __rfloordiv__, + __mod__, __rmod__, + __divmod__, __rdivmod__ + """ + + # --------------------------------------------------------------- + # Timedelta.__mul__, __rmul__ + + @pytest.mark.parametrize( + "td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")] + ) + @pytest.mark.parametrize("op", [operator.mul, ops.rmul]) + def test_td_mul_nat(self, op, td_nat): + # GH#19819 + td = Timedelta(10, unit="d") + typs = "|".join(["numpy.timedelta64", "NaTType", "Timedelta"]) + msg = "|".join( + [ + rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'", + r"ufunc '?multiply'? cannot use operands with types", + ] + ) + with pytest.raises(TypeError, match=msg): + op(td, td_nat) + + @pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")]) + @pytest.mark.parametrize("op", [operator.mul, ops.rmul]) + def test_td_mul_nan(self, op, nan): + # np.float64('NaN') has a 'dtype' attr, avoid treating as array + td = Timedelta(10, unit="d") + result = op(td, nan) + assert result is NaT + + @pytest.mark.parametrize("op", [operator.mul, ops.rmul]) + def test_td_mul_scalar(self, op): + # GH#19738 + td = Timedelta(minutes=3) + + result = op(td, 2) + assert result == Timedelta(minutes=6) + + result = op(td, 1.5) + assert result == Timedelta(minutes=4, seconds=30) + + assert op(td, np.nan) is NaT + + assert op(-1, td)._value == -1 * td._value + assert op(-1.0, td)._value == -1.0 * td._value + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + # timedelta * datetime is gibberish + op(td, Timestamp(2016, 1, 2)) + + with pytest.raises(TypeError, match=msg): + # invalid multiply with another timedelta + op(td, td) + + def test_td_mul_numeric_ndarray(self): + td = Timedelta("1 day") + other = np.array([2]) + expected = np.array([Timedelta("2 Days").to_timedelta64()]) + + result = td * other + tm.assert_numpy_array_equal(result, expected) + + result = other * td + tm.assert_numpy_array_equal(result, expected) + + def test_td_mul_numeric_ndarray_0d(self): + td = Timedelta("1 day") + other = np.array(2) + assert other.ndim == 0 + expected = Timedelta("2 days") + + res = td * other + assert type(res) is Timedelta + assert res == expected + + res = other * td + assert type(res) is Timedelta + assert res == expected + + def test_td_mul_td64_ndarray_invalid(self): + td = Timedelta("1 day") + other = np.array([Timedelta("2 Days").to_timedelta64()]) + + msg = ( + "ufunc '?multiply'? cannot use operands with types " + rf"dtype\('{tm.ENDIAN}m8\[ns\]'\) and dtype\('{tm.ENDIAN}m8\[ns\]'\)" + ) + with pytest.raises(TypeError, match=msg): + td * other + with pytest.raises(TypeError, match=msg): + other * td + + # --------------------------------------------------------------- + # Timedelta.__div__, __truediv__ + + def test_td_div_timedeltalike_scalar(self): + # GH#19738 + td = Timedelta(10, unit="d") + + result = td / offsets.Hour(1) + assert result == 240 + + assert td / td == 1 + assert td / np.timedelta64(60, "h") == 4 + + assert np.isnan(td / NaT) + + def test_td_div_td64_non_nano(self): + # truediv + td = Timedelta("1 days 2 hours 3 ns") + result = td / np.timedelta64(1, "D") + assert result == td._value / (86400 * 10**9) + result = td / np.timedelta64(1, "s") + assert result == td._value / 10**9 + result = td / np.timedelta64(1, "ns") + assert result == td._value + + # floordiv + td = Timedelta("1 days 2 hours 3 ns") + result = td // np.timedelta64(1, "D") + assert result == 1 + result = td // np.timedelta64(1, "s") + assert result == 93600 + result = td // np.timedelta64(1, "ns") + assert result == td._value + + def test_td_div_numeric_scalar(self): + # GH#19738 + td = Timedelta(10, unit="d") + + result = td / 2 + assert isinstance(result, Timedelta) + assert result == Timedelta(days=5) + + result = td / 5 + assert isinstance(result, Timedelta) + assert result == Timedelta(days=2) + + @pytest.mark.parametrize( + "nan", + [ + np.nan, + np.float64("NaN"), + float("nan"), + ], + ) + def test_td_div_nan(self, nan): + # np.float64('NaN') has a 'dtype' attr, avoid treating as array + td = Timedelta(10, unit="d") + result = td / nan + assert result is NaT + + result = td // nan + assert result is NaT + + def test_td_div_td64_ndarray(self): + td = Timedelta("1 day") + + other = np.array([Timedelta("2 Days").to_timedelta64()]) + expected = np.array([0.5]) + + result = td / other + tm.assert_numpy_array_equal(result, expected) + + result = other / td + tm.assert_numpy_array_equal(result, expected * 4) + + def test_td_div_ndarray_0d(self): + td = Timedelta("1 day") + + other = np.array(1) + res = td / other + assert isinstance(res, Timedelta) + assert res == td + + # --------------------------------------------------------------- + # Timedelta.__rdiv__ + + def test_td_rdiv_timedeltalike_scalar(self): + # GH#19738 + td = Timedelta(10, unit="d") + result = offsets.Hour(1) / td + assert result == 1 / 240.0 + + assert np.timedelta64(60, "h") / td == 0.25 + + def test_td_rdiv_na_scalar(self): + # GH#31869 None gets cast to NaT + td = Timedelta(10, unit="d") + + result = NaT / td + assert np.isnan(result) + + result = None / td + assert np.isnan(result) + + result = np.timedelta64("NaT") / td + assert np.isnan(result) + + msg = r"unsupported operand type\(s\) for /: 'numpy.datetime64' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + np.datetime64("NaT") / td + + msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + np.nan / td + + def test_td_rdiv_ndarray(self): + td = Timedelta(10, unit="d") + + arr = np.array([td], dtype=object) + result = arr / td + expected = np.array([1], dtype=np.float64) + tm.assert_numpy_array_equal(result, expected) + + arr = np.array([None]) + result = arr / td + expected = np.array([np.nan]) + tm.assert_numpy_array_equal(result, expected) + + arr = np.array([np.nan], dtype=object) + msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + arr / td + + arr = np.array([np.nan], dtype=np.float64) + msg = "cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + arr / td + + def test_td_rdiv_ndarray_0d(self): + td = Timedelta(10, unit="d") + + arr = np.array(td.asm8) + + assert arr / td == 1 + + # --------------------------------------------------------------- + # Timedelta.__floordiv__ + + def test_td_floordiv_timedeltalike_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + scalar = Timedelta(hours=3, minutes=3) + + assert td // scalar == 1 + assert -td // scalar.to_pytimedelta() == -2 + assert (2 * td) // scalar.to_timedelta64() == 2 + + def test_td_floordiv_null_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + assert td // np.nan is NaT + assert np.isnan(td // NaT) + assert np.isnan(td // np.timedelta64("NaT")) + + def test_td_floordiv_offsets(self): + # GH#19738 + td = Timedelta(hours=3, minutes=4) + assert td // offsets.Hour(1) == 3 + assert td // offsets.Minute(2) == 92 + + def test_td_floordiv_invalid_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + msg = "|".join( + [ + r"Invalid dtype datetime64\[D\] for __floordiv__", + "'dtype' is an invalid keyword argument for this function", + r"ufunc '?floor_divide'? cannot use operands with types", + ] + ) + with pytest.raises(TypeError, match=msg): + td // np.datetime64("2016-01-01", dtype="datetime64[us]") + + def test_td_floordiv_numeric_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + + expected = Timedelta(hours=1, minutes=32) + assert td // 2 == expected + assert td // 2.0 == expected + assert td // np.float64(2.0) == expected + assert td // np.int32(2.0) == expected + assert td // np.uint8(2.0) == expected + + def test_td_floordiv_timedeltalike_array(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + scalar = Timedelta(hours=3, minutes=3) + + # Array-like others + assert td // np.array(scalar.to_timedelta64()) == 1 + + res = (3 * td) // np.array([scalar.to_timedelta64()]) + expected = np.array([3], dtype=np.int64) + tm.assert_numpy_array_equal(res, expected) + + res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")]) + expected = np.array([10, np.nan]) + tm.assert_numpy_array_equal(res, expected) + + def test_td_floordiv_numeric_series(self): + # GH#18846 + td = Timedelta(hours=3, minutes=4) + ser = pd.Series([1], dtype=np.int64) + res = td // ser + assert res.dtype.kind == "m" + + # --------------------------------------------------------------- + # Timedelta.__rfloordiv__ + + def test_td_rfloordiv_timedeltalike_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + scalar = Timedelta(hours=3, minutes=4) + + # scalar others + # x // Timedelta is defined only for timedelta-like x. int-like, + # float-like, and date-like, in particular, should all either + # a) raise TypeError directly or + # b) return NotImplemented, following which the reversed + # operation will raise TypeError. + assert td.__rfloordiv__(scalar) == 1 + assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2 + assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0 + + def test_td_rfloordiv_null_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + assert np.isnan(td.__rfloordiv__(NaT)) + assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT"))) + + def test_td_rfloordiv_offsets(self): + # GH#19738 + assert offsets.Hour(1) // Timedelta(minutes=25) == 2 + + def test_td_rfloordiv_invalid_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + dt64 = np.datetime64("2016-01-01", "us") + + assert td.__rfloordiv__(dt64) is NotImplemented + + msg = ( + r"unsupported operand type\(s\) for //: 'numpy.datetime64' and 'Timedelta'" + ) + with pytest.raises(TypeError, match=msg): + dt64 // td + + def test_td_rfloordiv_numeric_scalar(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + + assert td.__rfloordiv__(np.nan) is NotImplemented + assert td.__rfloordiv__(3.5) is NotImplemented + assert td.__rfloordiv__(2) is NotImplemented + assert td.__rfloordiv__(np.float64(2.0)) is NotImplemented + assert td.__rfloordiv__(np.uint8(9)) is NotImplemented + assert td.__rfloordiv__(np.int32(2.0)) is NotImplemented + + msg = r"unsupported operand type\(s\) for //: '.*' and 'Timedelta" + with pytest.raises(TypeError, match=msg): + np.float64(2.0) // td + with pytest.raises(TypeError, match=msg): + np.uint8(9) // td + with pytest.raises(TypeError, match=msg): + # deprecated GH#19761, enforced GH#29797 + np.int32(2.0) // td + + def test_td_rfloordiv_timedeltalike_array(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + scalar = Timedelta(hours=3, minutes=4) + + # Array-like others + assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1 + + res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()])) + expected = np.array([3], dtype=np.int64) + tm.assert_numpy_array_equal(res, expected) + + arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")]) + res = td.__rfloordiv__(arr) + expected = np.array([10, np.nan]) + tm.assert_numpy_array_equal(res, expected) + + def test_td_rfloordiv_intarray(self): + # deprecated GH#19761, enforced GH#29797 + ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10**9 + + msg = "Invalid dtype" + with pytest.raises(TypeError, match=msg): + ints // Timedelta(1, unit="s") + + def test_td_rfloordiv_numeric_series(self): + # GH#18846 + td = Timedelta(hours=3, minutes=3) + ser = pd.Series([1], dtype=np.int64) + res = td.__rfloordiv__(ser) + assert res is NotImplemented + + msg = "Invalid dtype" + with pytest.raises(TypeError, match=msg): + # Deprecated GH#19761, enforced GH#29797 + ser // td + + # ---------------------------------------------------------------- + # Timedelta.__mod__, __rmod__ + + def test_mod_timedeltalike(self): + # GH#19365 + td = Timedelta(hours=37) + + # Timedelta-like others + result = td % Timedelta(hours=6) + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=1) + + result = td % timedelta(minutes=60) + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = td % NaT + assert result is NaT + + def test_mod_timedelta64_nat(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % np.timedelta64("NaT", "ns") + assert result is NaT + + def test_mod_timedelta64(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % np.timedelta64(2, "h") + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=1) + + def test_mod_offset(self): + # GH#19365 + td = Timedelta(hours=37) + + result = td % offsets.Hour(5) + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=2) + + def test_mod_numeric(self): + # GH#19365 + td = Timedelta(hours=37) + + # Numeric Others + result = td % 2 + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = td % 1e12 + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=3, seconds=20) + + result = td % int(1e12) + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=3, seconds=20) + + def test_mod_invalid(self): + # GH#19365 + td = Timedelta(hours=37) + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + td % Timestamp("2018-01-22") + + with pytest.raises(TypeError, match=msg): + td % [] + + def test_rmod_pytimedelta(self): + # GH#19365 + td = Timedelta(minutes=3) + + result = timedelta(minutes=4) % td + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=1) + + def test_rmod_timedelta64(self): + # GH#19365 + td = Timedelta(minutes=3) + result = np.timedelta64(5, "m") % td + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=2) + + def test_rmod_invalid(self): + # GH#19365 + td = Timedelta(minutes=3) + + msg = "unsupported operand" + with pytest.raises(TypeError, match=msg): + Timestamp("2018-01-22") % td + + with pytest.raises(TypeError, match=msg): + 15 % td + + with pytest.raises(TypeError, match=msg): + 16.0 % td + + msg = "Invalid dtype int" + with pytest.raises(TypeError, match=msg): + np.array([22, 24]) % td + + # ---------------------------------------------------------------- + # Timedelta.__divmod__, __rdivmod__ + + def test_divmod_numeric(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, 53 * 3600 * 1e9) + assert result[0] == Timedelta(1, unit="ns") + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=1) + + assert result + result = divmod(td, np.nan) + assert result[0] is NaT + assert result[1] is NaT + + def test_divmod(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, timedelta(days=1)) + assert result[0] == 2 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=6) + + result = divmod(td, 54) + assert result[0] == Timedelta(hours=1) + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(0) + + result = divmod(td, NaT) + assert np.isnan(result[0]) + assert result[1] is NaT + + def test_divmod_offset(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, offsets.Hour(-4)) + assert result[0] == -14 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=-2) + + def test_divmod_invalid(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + msg = r"unsupported operand type\(s\) for //: 'Timedelta' and 'Timestamp'" + with pytest.raises(TypeError, match=msg): + divmod(td, Timestamp("2018-01-22")) + + def test_rdivmod_pytimedelta(self): + # GH#19365 + result = divmod(timedelta(days=2, hours=6), Timedelta(days=1)) + assert result[0] == 2 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=6) + + def test_rdivmod_offset(self): + result = divmod(offsets.Hour(54), Timedelta(hours=-4)) + assert result[0] == -14 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=-2) + + def test_rdivmod_invalid(self): + # GH#19365 + td = Timedelta(minutes=3) + msg = "unsupported operand type" + + with pytest.raises(TypeError, match=msg): + divmod(Timestamp("2018-01-22"), td) + + with pytest.raises(TypeError, match=msg): + divmod(15, td) + + with pytest.raises(TypeError, match=msg): + divmod(16.0, td) + + msg = "Invalid dtype int" + with pytest.raises(TypeError, match=msg): + divmod(np.array([22, 24]), td) + + # ---------------------------------------------------------------- + + @pytest.mark.parametrize( + "op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub] + ) + @pytest.mark.parametrize( + "arr", + [ + np.array([Timestamp("20130101 9:01"), Timestamp("20121230 9:02")]), + np.array([Timestamp("2021-11-09 09:54:00"), Timedelta("1D")]), + ], + ) + def test_td_op_timedelta_timedeltalike_array(self, op, arr): + msg = "unsupported operand type|cannot use operands with types" + with pytest.raises(TypeError, match=msg): + op(arr, Timedelta("1D")) + + +class TestTimedeltaComparison: + @pytest.mark.skip_ubsan + def test_compare_pytimedelta_bounds(self): + # GH#49021 don't overflow on comparison with very large pytimedeltas + + for unit in ["ns", "us"]: + tdmax = Timedelta.max.as_unit(unit).max + tdmin = Timedelta.min.as_unit(unit).min + + assert tdmax < timedelta.max + assert tdmax <= timedelta.max + assert not tdmax > timedelta.max + assert not tdmax >= timedelta.max + assert tdmax != timedelta.max + assert not tdmax == timedelta.max + + assert tdmin > timedelta.min + assert tdmin >= timedelta.min + assert not tdmin < timedelta.min + assert not tdmin <= timedelta.min + assert tdmin != timedelta.min + assert not tdmin == timedelta.min + + # But the "ms" and "s"-reso bounds extend pass pytimedelta + for unit in ["ms", "s"]: + tdmax = Timedelta.max.as_unit(unit).max + tdmin = Timedelta.min.as_unit(unit).min + + assert tdmax > timedelta.max + assert tdmax >= timedelta.max + assert not tdmax < timedelta.max + assert not tdmax <= timedelta.max + assert tdmax != timedelta.max + assert not tdmax == timedelta.max + + assert tdmin < timedelta.min + assert tdmin <= timedelta.min + assert not tdmin > timedelta.min + assert not tdmin >= timedelta.min + assert tdmin != timedelta.min + assert not tdmin == timedelta.min + + def test_compare_pytimedelta_bounds2(self): + # a pytimedelta outside the microsecond bounds + pytd = timedelta(days=999999999, seconds=86399) + # NB: np.timedelta64(td, "s"") incorrectly overflows + td64 = np.timedelta64(pytd.days, "D") + np.timedelta64(pytd.seconds, "s") + td = Timedelta(td64) + assert td.days == pytd.days + assert td.seconds == pytd.seconds + + assert td == pytd + assert not td != pytd + assert not td < pytd + assert not td > pytd + assert td <= pytd + assert td >= pytd + + td2 = td - Timedelta(seconds=1).as_unit("s") + assert td2 != pytd + assert not td2 == pytd + assert td2 < pytd + assert td2 <= pytd + assert not td2 > pytd + assert not td2 >= pytd + + def test_compare_tick(self, tick_classes): + cls = tick_classes + + off = cls(4) + td = off._as_pd_timedelta + assert isinstance(td, Timedelta) + + assert td == off + assert not td != off + assert td <= off + assert td >= off + assert not td < off + assert not td > off + + assert not td == 2 * off + assert td != 2 * off + assert td <= 2 * off + assert td < 2 * off + assert not td >= 2 * off + assert not td > 2 * off + + def test_comparison_object_array(self): + # analogous to GH#15183 + td = Timedelta("2 days") + other = Timedelta("3 hours") + + arr = np.array([other, td], dtype=object) + res = arr == td + expected = np.array([False, True], dtype=bool) + assert (res == expected).all() + + # 2D case + arr = np.array([[other, td], [td, other]], dtype=object) + res = arr != td + expected = np.array([[True, False], [False, True]], dtype=bool) + assert res.shape == expected.shape + assert (res == expected).all() + + def test_compare_timedelta_ndarray(self): + # GH#11835 + periods = [Timedelta("0 days 01:00:00"), Timedelta("0 days 01:00:00")] + arr = np.array(periods) + result = arr[0] > arr + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_td64_ndarray(self): + # GG#33441 + arr = np.arange(5).astype("timedelta64[ns]") + td = Timedelta(arr[1]) + + expected = np.array([False, True, False, False, False], dtype=bool) + + result = td == arr + tm.assert_numpy_array_equal(result, expected) + + result = arr == td + tm.assert_numpy_array_equal(result, expected) + + result = td != arr + tm.assert_numpy_array_equal(result, ~expected) + + result = arr != td + tm.assert_numpy_array_equal(result, ~expected) + + def test_compare_custom_object(self): + """ + Make sure non supported operations on Timedelta returns NonImplemented + and yields to other operand (GH#20829). + """ + + class CustomClass: + def __init__(self, cmp_result=None) -> None: + self.cmp_result = cmp_result + + def generic_result(self): + if self.cmp_result is None: + return NotImplemented + else: + return self.cmp_result + + def __eq__(self, other): + return self.generic_result() + + def __gt__(self, other): + return self.generic_result() + + t = Timedelta("1s") + + assert t != "string" + assert t != 1 + assert t != CustomClass() + assert t != CustomClass(cmp_result=False) + + assert t < CustomClass(cmp_result=True) + assert not t < CustomClass(cmp_result=False) + + assert t == CustomClass(cmp_result=True) + + @pytest.mark.parametrize("val", ["string", 1]) + def test_compare_unknown_type(self, val): + # GH#20829 + t = Timedelta("1s") + msg = "not supported between instances of 'Timedelta' and '(int|str)'" + with pytest.raises(TypeError, match=msg): + t >= val + with pytest.raises(TypeError, match=msg): + t > val + with pytest.raises(TypeError, match=msg): + t <= val + with pytest.raises(TypeError, match=msg): + t < val + + +def test_ops_notimplemented(): + class Other: + pass + + other = Other() + + td = Timedelta("1 day") + assert td.__add__(other) is NotImplemented + assert td.__sub__(other) is NotImplemented + assert td.__truediv__(other) is NotImplemented + assert td.__mul__(other) is NotImplemented + assert td.__floordiv__(other) is NotImplemented + + +def test_ops_error_str(): + # GH#13624 + td = Timedelta("1 day") + + for left, right in [(td, "a"), ("a", td)]: + msg = "|".join( + [ + "unsupported operand type", + r'can only concatenate str \(not "Timedelta"\) to str', + "must be str, not Timedelta", + ] + ) + with pytest.raises(TypeError, match=msg): + left + right + + msg = "not supported between instances of" + with pytest.raises(TypeError, match=msg): + left > right + + assert not left == right # pylint: disable=unneeded-not + assert left != right diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..4663f8cb719616cadd6e946c987e76bc3d979b01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_constructors.py @@ -0,0 +1,698 @@ +from datetime import timedelta +from itertools import product + +import numpy as np +import pytest + +from pandas._libs.tslibs import OutOfBoundsTimedelta +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit + +from pandas import ( + Index, + NaT, + Timedelta, + TimedeltaIndex, + offsets, + to_timedelta, +) +import pandas._testing as tm + + +class TestTimedeltaConstructorUnitKeyword: + @pytest.mark.parametrize("unit", ["Y", "y", "M"]) + def test_unit_m_y_raises(self, unit): + msg = "Units 'M', 'Y', and 'y' are no longer supported" + + with pytest.raises(ValueError, match=msg): + Timedelta(10, unit) + + with pytest.raises(ValueError, match=msg): + to_timedelta(10, unit) + + with pytest.raises(ValueError, match=msg): + to_timedelta([1, 2], unit) + + @pytest.mark.parametrize( + "unit,unit_depr", + [ + ("h", "H"), + ("min", "T"), + ("s", "S"), + ("ms", "L"), + ("ns", "N"), + ("us", "U"), + ], + ) + def test_units_H_T_S_L_N_U_deprecated(self, unit, unit_depr): + # GH#52536 + msg = f"'{unit_depr}' is deprecated and will be removed in a future version." + + expected = Timedelta(1, unit=unit) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = Timedelta(1, unit=unit_depr) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "unit, np_unit", + [(value, "W") for value in ["W", "w"]] + + [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]] + + [ + (value, "m") + for value in [ + "m", + "minute", + "min", + "minutes", + "Minute", + "Min", + "Minutes", + ] + ] + + [ + (value, "s") + for value in [ + "s", + "seconds", + "sec", + "second", + "Seconds", + "Sec", + "Second", + ] + ] + + [ + (value, "ms") + for value in [ + "ms", + "milliseconds", + "millisecond", + "milli", + "millis", + "MS", + "Milliseconds", + "Millisecond", + "Milli", + "Millis", + ] + ] + + [ + (value, "us") + for value in [ + "us", + "microseconds", + "microsecond", + "micro", + "micros", + "u", + "US", + "Microseconds", + "Microsecond", + "Micro", + "Micros", + "U", + ] + ] + + [ + (value, "ns") + for value in [ + "ns", + "nanoseconds", + "nanosecond", + "nano", + "nanos", + "n", + "NS", + "Nanoseconds", + "Nanosecond", + "Nano", + "Nanos", + "N", + ] + ], + ) + @pytest.mark.parametrize("wrapper", [np.array, list, Index]) + def test_unit_parser(self, unit, np_unit, wrapper): + # validate all units, GH 6855, GH 21762 + # array-likes + expected = TimedeltaIndex( + [np.timedelta64(i, np_unit) for i in np.arange(5).tolist()], + dtype="m8[ns]", + ) + # TODO(2.0): the desired output dtype may have non-nano resolution + msg = f"'{unit}' is deprecated and will be removed in a future version." + + if (unit, np_unit) in (("u", "us"), ("U", "us"), ("n", "ns"), ("N", "ns")): + warn = FutureWarning + else: + warn = FutureWarning + msg = "The 'unit' keyword in TimedeltaIndex construction is deprecated" + with tm.assert_produces_warning(warn, match=msg): + result = to_timedelta(wrapper(range(5)), unit=unit) + tm.assert_index_equal(result, expected) + result = TimedeltaIndex(wrapper(range(5)), unit=unit) + tm.assert_index_equal(result, expected) + + str_repr = [f"{x}{unit}" for x in np.arange(5)] + result = to_timedelta(wrapper(str_repr)) + tm.assert_index_equal(result, expected) + result = to_timedelta(wrapper(str_repr)) + tm.assert_index_equal(result, expected) + + # scalar + expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]")) + result = to_timedelta(2, unit=unit) + assert result == expected + result = Timedelta(2, unit=unit) + assert result == expected + + result = to_timedelta(f"2{unit}") + assert result == expected + result = Timedelta(f"2{unit}") + assert result == expected + + +def test_construct_from_kwargs_overflow(): + # GH#55503 + msg = "seconds=86400000000000000000, milliseconds=0, microseconds=0, nanoseconds=0" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(days=10**6) + msg = "seconds=60000000000000000000, milliseconds=0, microseconds=0, nanoseconds=0" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(minutes=10**9) + + +def test_construct_with_weeks_unit_overflow(): + # GH#47268 don't silently wrap around + with pytest.raises(OutOfBoundsTimedelta, match="without overflow"): + Timedelta(1000000000000000000, unit="W") + + with pytest.raises(OutOfBoundsTimedelta, match="without overflow"): + Timedelta(1000000000000000000.0, unit="W") + + +def test_construct_from_td64_with_unit(): + # ignore the unit, as it may cause silently overflows leading to incorrect + # results, and in non-overflow cases is irrelevant GH#46827 + obj = np.timedelta64(123456789000000000, "h") + + with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): + Timedelta(obj, unit="ps") + + with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): + Timedelta(obj, unit="ns") + + with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"): + Timedelta(obj) + + +def test_from_td64_retain_resolution(): + # case where we retain millisecond resolution + obj = np.timedelta64(12345, "ms") + + td = Timedelta(obj) + assert td._value == obj.view("i8") + assert td._creso == NpyDatetimeUnit.NPY_FR_ms.value + + # Case where we cast to nearest-supported reso + obj2 = np.timedelta64(1234, "D") + td2 = Timedelta(obj2) + assert td2._creso == NpyDatetimeUnit.NPY_FR_s.value + assert td2 == obj2 + assert td2.days == 1234 + + # Case that _would_ overflow if we didn't support non-nano + obj3 = np.timedelta64(1000000000000000000, "us") + td3 = Timedelta(obj3) + assert td3.total_seconds() == 1000000000000 + assert td3._creso == NpyDatetimeUnit.NPY_FR_us.value + + +def test_from_pytimedelta_us_reso(): + # pytimedelta has microsecond resolution, so Timedelta(pytd) inherits that + td = timedelta(days=4, minutes=3) + result = Timedelta(td) + assert result.to_pytimedelta() == td + assert result._creso == NpyDatetimeUnit.NPY_FR_us.value + + +def test_from_tick_reso(): + tick = offsets.Nano() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ns.value + + tick = offsets.Micro() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_us.value + + tick = offsets.Milli() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ms.value + + tick = offsets.Second() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value + + # everything above Second gets cast to the closest supported reso: second + tick = offsets.Minute() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value + + tick = offsets.Hour() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value + + tick = offsets.Day() + assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value + + +def test_construction(): + expected = np.timedelta64(10, "D").astype("m8[ns]").view("i8") + assert Timedelta(10, unit="d")._value == expected + assert Timedelta(10.0, unit="d")._value == expected + assert Timedelta("10 days")._value == expected + assert Timedelta(days=10)._value == expected + assert Timedelta(days=10.0)._value == expected + + expected += np.timedelta64(10, "s").astype("m8[ns]").view("i8") + assert Timedelta("10 days 00:00:10")._value == expected + assert Timedelta(days=10, seconds=10)._value == expected + assert Timedelta(days=10, milliseconds=10 * 1000)._value == expected + assert Timedelta(days=10, microseconds=10 * 1000 * 1000)._value == expected + + # rounding cases + assert Timedelta(82739999850000)._value == 82739999850000 + assert "0 days 22:58:59.999850" in str(Timedelta(82739999850000)) + assert Timedelta(123072001000000)._value == 123072001000000 + assert "1 days 10:11:12.001" in str(Timedelta(123072001000000)) + + # string conversion with/without leading zero + # GH#9570 + assert Timedelta("0:00:00") == timedelta(hours=0) + assert Timedelta("00:00:00") == timedelta(hours=0) + assert Timedelta("-1:00:00") == -timedelta(hours=1) + assert Timedelta("-01:00:00") == -timedelta(hours=1) + + # more strings & abbrevs + # GH#8190 + assert Timedelta("1 h") == timedelta(hours=1) + assert Timedelta("1 hour") == timedelta(hours=1) + assert Timedelta("1 hr") == timedelta(hours=1) + assert Timedelta("1 hours") == timedelta(hours=1) + assert Timedelta("-1 hours") == -timedelta(hours=1) + assert Timedelta("1 m") == timedelta(minutes=1) + assert Timedelta("1.5 m") == timedelta(seconds=90) + assert Timedelta("1 minute") == timedelta(minutes=1) + assert Timedelta("1 minutes") == timedelta(minutes=1) + assert Timedelta("1 s") == timedelta(seconds=1) + assert Timedelta("1 second") == timedelta(seconds=1) + assert Timedelta("1 seconds") == timedelta(seconds=1) + assert Timedelta("1 ms") == timedelta(milliseconds=1) + assert Timedelta("1 milli") == timedelta(milliseconds=1) + assert Timedelta("1 millisecond") == timedelta(milliseconds=1) + assert Timedelta("1 us") == timedelta(microseconds=1) + assert Timedelta("1 µs") == timedelta(microseconds=1) + assert Timedelta("1 micros") == timedelta(microseconds=1) + assert Timedelta("1 microsecond") == timedelta(microseconds=1) + assert Timedelta("1.5 microsecond") == Timedelta("00:00:00.000001500") + assert Timedelta("1 ns") == Timedelta("00:00:00.000000001") + assert Timedelta("1 nano") == Timedelta("00:00:00.000000001") + assert Timedelta("1 nanosecond") == Timedelta("00:00:00.000000001") + + # combos + assert Timedelta("10 days 1 hour") == timedelta(days=10, hours=1) + assert Timedelta("10 days 1 h") == timedelta(days=10, hours=1) + assert Timedelta("10 days 1 h 1m 1s") == timedelta( + days=10, hours=1, minutes=1, seconds=1 + ) + assert Timedelta("-10 days 1 h 1m 1s") == -timedelta( + days=10, hours=1, minutes=1, seconds=1 + ) + assert Timedelta("-10 days 1 h 1m 1s") == -timedelta( + days=10, hours=1, minutes=1, seconds=1 + ) + assert Timedelta("-10 days 1 h 1m 1s 3us") == -timedelta( + days=10, hours=1, minutes=1, seconds=1, microseconds=3 + ) + assert Timedelta("-10 days 1 h 1.5m 1s 3us") == -timedelta( + days=10, hours=1, minutes=1, seconds=31, microseconds=3 + ) + + # Currently invalid as it has a - on the hh:mm:dd part + # (only allowed on the days) + msg = "only leading negative signs are allowed" + with pytest.raises(ValueError, match=msg): + Timedelta("-10 days -1 h 1.5m 1s 3us") + + # only leading neg signs are allowed + with pytest.raises(ValueError, match=msg): + Timedelta("10 days -1 h 1.5m 1s 3us") + + # no units specified + msg = "no units specified" + with pytest.raises(ValueError, match=msg): + Timedelta("3.1415") + + # invalid construction + msg = "cannot construct a Timedelta" + with pytest.raises(ValueError, match=msg): + Timedelta() + + msg = "unit abbreviation w/o a number" + with pytest.raises(ValueError, match=msg): + Timedelta("foo") + + msg = ( + "cannot construct a Timedelta from " + "the passed arguments, allowed keywords are " + ) + with pytest.raises(ValueError, match=msg): + Timedelta(day=10) + + # floats + expected = np.timedelta64(10, "s").astype("m8[ns]").view("i8") + np.timedelta64( + 500, "ms" + ).astype("m8[ns]").view("i8") + assert Timedelta(10.5, unit="s")._value == expected + + # offset + assert to_timedelta(offsets.Hour(2)) == Timedelta(hours=2) + assert Timedelta(offsets.Hour(2)) == Timedelta(hours=2) + assert Timedelta(offsets.Second(2)) == Timedelta(seconds=2) + + # GH#11995: unicode + expected = Timedelta("1h") + result = Timedelta("1h") + assert result == expected + assert to_timedelta(offsets.Hour(2)) == Timedelta("0 days, 02:00:00") + + msg = "unit abbreviation w/o a number" + with pytest.raises(ValueError, match=msg): + Timedelta("foo bar") + + +@pytest.mark.parametrize( + "item", + list( + { + "days": "D", + "seconds": "s", + "microseconds": "us", + "milliseconds": "ms", + "minutes": "m", + "hours": "h", + "weeks": "W", + }.items() + ), +) +@pytest.mark.parametrize( + "npdtype", [np.int64, np.int32, np.int16, np.float64, np.float32, np.float16] +) +def test_td_construction_with_np_dtypes(npdtype, item): + # GH#8757: test construction with np dtypes + pykwarg, npkwarg = item + expected = np.timedelta64(1, npkwarg).astype("m8[ns]").view("i8") + assert Timedelta(**{pykwarg: npdtype(1)})._value == expected + + +@pytest.mark.parametrize( + "val", + [ + "1s", + "-1s", + "1us", + "-1us", + "1 day", + "-1 day", + "-23:59:59.999999", + "-1 days +23:59:59.999999", + "-1ns", + "1ns", + "-23:59:59.999999999", + ], +) +def test_td_from_repr_roundtrip(val): + # round-trip both for string and value + td = Timedelta(val) + assert Timedelta(td._value) == td + + assert Timedelta(str(td)) == td + assert Timedelta(td._repr_base(format="all")) == td + assert Timedelta(td._repr_base()) == td + + +def test_overflow_on_construction(): + # GH#3374 + value = Timedelta("1day")._value * 20169940 + msg = "Cannot cast 1742682816000000000000 from ns to 'ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(value) + + # xref GH#17637 + msg = "Cannot cast 139993 from D to 'ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(7 * 19999, unit="D") + + # used to overflow before non-ns support + td = Timedelta(timedelta(days=13 * 19999)) + assert td._creso == NpyDatetimeUnit.NPY_FR_us.value + assert td.days == 13 * 19999 + + +@pytest.mark.parametrize( + "val, unit", + [ + (15251, "W"), # 1 + (106752, "D"), # change from previous: + (2562048, "h"), # 0 hours + (153722868, "m"), # 13 minutes + (9223372037, "s"), # 44 seconds + ], +) +def test_construction_out_of_bounds_td64ns(val, unit): + # TODO: parametrize over units just above/below the implementation bounds + # once GH#38964 is resolved + + # Timedelta.max is just under 106752 days + td64 = np.timedelta64(val, unit) + assert td64.astype("m8[ns]").view("i8") < 0 # i.e. naive astype will be wrong + + td = Timedelta(td64) + if unit != "M": + # with unit="M" the conversion to "s" is poorly defined + # (and numpy issues DeprecationWarning) + assert td.asm8 == td64 + assert td.asm8.dtype == "m8[s]" + msg = r"Cannot cast 1067\d\d days .* to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td.as_unit("ns") + + # But just back in bounds and we are OK + assert Timedelta(td64 - 1) == td64 - 1 + + td64 *= -1 + assert td64.astype("m8[ns]").view("i8") > 0 # i.e. naive astype will be wrong + + td2 = Timedelta(td64) + msg = r"Cannot cast -1067\d\d days .* to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td2.as_unit("ns") + + # But just back in bounds and we are OK + assert Timedelta(td64 + 1) == td64 + 1 + + +@pytest.mark.parametrize( + "val, unit", + [ + (15251 * 10**9, "W"), + (106752 * 10**9, "D"), + (2562048 * 10**9, "h"), + (153722868 * 10**9, "m"), + ], +) +def test_construction_out_of_bounds_td64s(val, unit): + td64 = np.timedelta64(val, unit) + with pytest.raises(OutOfBoundsTimedelta, match=str(td64)): + Timedelta(td64) + + # But just back in bounds and we are OK + assert Timedelta(td64 - 10**9) == td64 - 10**9 + + +@pytest.mark.parametrize( + "fmt,exp", + [ + ( + "P6DT0H50M3.010010012S", + Timedelta( + days=6, + minutes=50, + seconds=3, + milliseconds=10, + microseconds=10, + nanoseconds=12, + ), + ), + ( + "P-6DT0H50M3.010010012S", + Timedelta( + days=-6, + minutes=50, + seconds=3, + milliseconds=10, + microseconds=10, + nanoseconds=12, + ), + ), + ("P4DT12H30M5S", Timedelta(days=4, hours=12, minutes=30, seconds=5)), + ("P0DT0H0M0.000000123S", Timedelta(nanoseconds=123)), + ("P0DT0H0M0.00001S", Timedelta(microseconds=10)), + ("P0DT0H0M0.001S", Timedelta(milliseconds=1)), + ("P0DT0H1M0S", Timedelta(minutes=1)), + ("P1DT25H61M61S", Timedelta(days=1, hours=25, minutes=61, seconds=61)), + ("PT1S", Timedelta(seconds=1)), + ("PT0S", Timedelta(seconds=0)), + ("P1WT0S", Timedelta(days=7, seconds=0)), + ("P1D", Timedelta(days=1)), + ("P1DT1H", Timedelta(days=1, hours=1)), + ("P1W", Timedelta(days=7)), + ("PT300S", Timedelta(seconds=300)), + ("P1DT0H0M00000000000S", Timedelta(days=1)), + ("PT-6H3M", Timedelta(hours=-6, minutes=3)), + ("-PT6H3M", Timedelta(hours=-6, minutes=-3)), + ("-PT-6H+3M", Timedelta(hours=6, minutes=-3)), + ], +) +def test_iso_constructor(fmt, exp): + assert Timedelta(fmt) == exp + + +@pytest.mark.parametrize( + "fmt", + [ + "PPPPPPPPPPPP", + "PDTHMS", + "P0DT999H999M999S", + "P1DT0H0M0.0000000000000S", + "P1DT0H0M0.S", + "P", + "-P", + ], +) +def test_iso_constructor_raises(fmt): + msg = f"Invalid ISO 8601 Duration format - {fmt}" + with pytest.raises(ValueError, match=msg): + Timedelta(fmt) + + +@pytest.mark.parametrize( + "constructed_td, conversion", + [ + (Timedelta(nanoseconds=100), "100ns"), + ( + Timedelta( + days=1, + hours=1, + minutes=1, + weeks=1, + seconds=1, + milliseconds=1, + microseconds=1, + nanoseconds=1, + ), + 694861001001001, + ), + (Timedelta(microseconds=1) + Timedelta(nanoseconds=1), "1us1ns"), + (Timedelta(microseconds=1) - Timedelta(nanoseconds=1), "999ns"), + (Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), "990ns"), + ], +) +def test_td_constructor_on_nanoseconds(constructed_td, conversion): + # GH#9273 + assert constructed_td == Timedelta(conversion) + + +def test_td_constructor_value_error(): + msg = "Invalid type . Must be int or float." + with pytest.raises(TypeError, match=msg): + Timedelta(nanoseconds="abc") + + +def test_timedelta_constructor_identity(): + # Test for #30543 + expected = Timedelta(np.timedelta64(1, "s")) + result = Timedelta(expected) + assert result is expected + + +def test_timedelta_pass_td_and_kwargs_raises(): + # don't silently ignore the kwargs GH#48898 + td = Timedelta(days=1) + msg = ( + "Cannot pass both a Timedelta input and timedelta keyword arguments, " + r"got \['days'\]" + ) + with pytest.raises(ValueError, match=msg): + Timedelta(td, days=2) + + +@pytest.mark.parametrize( + "constructor, value, unit, expectation", + [ + (Timedelta, "10s", "ms", (ValueError, "unit must not be specified")), + (to_timedelta, "10s", "ms", (ValueError, "unit must not be specified")), + (to_timedelta, ["1", 2, 3], "s", (ValueError, "unit must not be specified")), + ], +) +def test_string_with_unit(constructor, value, unit, expectation): + exp, match = expectation + with pytest.raises(exp, match=match): + _ = constructor(value, unit=unit) + + +@pytest.mark.parametrize( + "value", + [ + "".join(elements) + for repetition in (1, 2) + for elements in product("+-, ", repeat=repetition) + ], +) +def test_string_without_numbers(value): + # GH39710 Timedelta input string with only symbols and no digits raises an error + msg = ( + "symbols w/o a number" + if value != "--" + else "only leading negative signs are allowed" + ) + with pytest.raises(ValueError, match=msg): + Timedelta(value) + + +def test_timedelta_new_npnat(): + # GH#48898 + nat = np.timedelta64("NaT", "h") + assert Timedelta(nat) is NaT + + +def test_subclass_respected(): + # GH#49579 + class MyCustomTimedelta(Timedelta): + pass + + td = MyCustomTimedelta("1 minute") + assert isinstance(td, MyCustomTimedelta) + + +def test_non_nano_value(): + # https://github.com/pandas-dev/pandas/issues/49076 + result = Timedelta(10, unit="D").as_unit("s").value + # `.value` shows nanoseconds, even though unit is 's' + assert result == 864000000000000 + + # out-of-nanoseconds-bounds `.value` raises informative message + msg = ( + r"Cannot convert Timedelta to nanoseconds without overflow. " + r"Use `.asm8.view\('i8'\)` to cast represent Timedelta in its " + r"own unit \(here, s\).$" + ) + td = Timedelta(1_000, "D").as_unit("s") * 1_000 + with pytest.raises(OverflowError, match=msg): + td.value + # check that the suggested workaround actually works + result = td.asm8.view("i8") + assert result == 86400000000 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_formats.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..e1b0076d5b7b99f4baba3f18ccf84d8054a31087 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_formats.py @@ -0,0 +1,109 @@ +import pytest + +from pandas import Timedelta + + +@pytest.mark.parametrize( + "td, expected_repr", + [ + (Timedelta(10, unit="d"), "Timedelta('10 days 00:00:00')"), + (Timedelta(10, unit="s"), "Timedelta('0 days 00:00:10')"), + (Timedelta(10, unit="ms"), "Timedelta('0 days 00:00:00.010000')"), + (Timedelta(-10, unit="ms"), "Timedelta('-1 days +23:59:59.990000')"), + ], +) +def test_repr(td, expected_repr): + assert repr(td) == expected_repr + + +@pytest.mark.parametrize( + "td, expected_iso", + [ + ( + Timedelta( + days=6, + minutes=50, + seconds=3, + milliseconds=10, + microseconds=10, + nanoseconds=12, + ), + "P6DT0H50M3.010010012S", + ), + (Timedelta(days=4, hours=12, minutes=30, seconds=5), "P4DT12H30M5S"), + (Timedelta(nanoseconds=123), "P0DT0H0M0.000000123S"), + # trim nano + (Timedelta(microseconds=10), "P0DT0H0M0.00001S"), + # trim micro + (Timedelta(milliseconds=1), "P0DT0H0M0.001S"), + # don't strip every 0 + (Timedelta(minutes=1), "P0DT0H1M0S"), + ], +) +def test_isoformat(td, expected_iso): + assert td.isoformat() == expected_iso + + +class TestReprBase: + def test_none(self): + delta_1d = Timedelta(1, unit="D") + delta_0d = Timedelta(0, unit="D") + delta_1s = Timedelta(1, unit="s") + delta_500ms = Timedelta(500, unit="ms") + + drepr = lambda x: x._repr_base() + assert drepr(delta_1d) == "1 days" + assert drepr(-delta_1d) == "-1 days" + assert drepr(delta_0d) == "0 days" + assert drepr(delta_1s) == "0 days 00:00:01" + assert drepr(delta_500ms) == "0 days 00:00:00.500000" + assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" + assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" + assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" + assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" + + def test_sub_day(self): + delta_1d = Timedelta(1, unit="D") + delta_0d = Timedelta(0, unit="D") + delta_1s = Timedelta(1, unit="s") + delta_500ms = Timedelta(500, unit="ms") + + drepr = lambda x: x._repr_base(format="sub_day") + assert drepr(delta_1d) == "1 days" + assert drepr(-delta_1d) == "-1 days" + assert drepr(delta_0d) == "00:00:00" + assert drepr(delta_1s) == "00:00:01" + assert drepr(delta_500ms) == "00:00:00.500000" + assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" + assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" + assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" + assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" + + def test_long(self): + delta_1d = Timedelta(1, unit="D") + delta_0d = Timedelta(0, unit="D") + delta_1s = Timedelta(1, unit="s") + delta_500ms = Timedelta(500, unit="ms") + + drepr = lambda x: x._repr_base(format="long") + assert drepr(delta_1d) == "1 days 00:00:00" + assert drepr(-delta_1d) == "-1 days +00:00:00" + assert drepr(delta_0d) == "0 days 00:00:00" + assert drepr(delta_1s) == "0 days 00:00:01" + assert drepr(delta_500ms) == "0 days 00:00:00.500000" + assert drepr(delta_1d + delta_1s) == "1 days 00:00:01" + assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01" + assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000" + assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000" + + def test_all(self): + delta_1d = Timedelta(1, unit="D") + delta_0d = Timedelta(0, unit="D") + delta_1ns = Timedelta(1, unit="ns") + + drepr = lambda x: x._repr_base(format="all") + assert drepr(delta_1d) == "1 days 00:00:00.000000000" + assert drepr(-delta_1d) == "-1 days +00:00:00.000000000" + assert drepr(delta_0d) == "0 days 00:00:00.000000000" + assert drepr(delta_1ns) == "0 days 00:00:00.000000001" + assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001" diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_timedelta.py b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_timedelta.py new file mode 100644 index 0000000000000000000000000000000000000000..d4398f66e6f890a26dc90d540766d71d7857ad67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_timedelta.py @@ -0,0 +1,666 @@ +""" test the scalar Timedelta """ +from datetime import timedelta +import sys + +from hypothesis import ( + given, + strategies as st, +) +import numpy as np +import pytest + +from pandas._libs import lib +from pandas._libs.tslibs import ( + NaT, + iNaT, +) +from pandas._libs.tslibs.dtypes import NpyDatetimeUnit +from pandas.errors import OutOfBoundsTimedelta + +from pandas import ( + Timedelta, + to_timedelta, +) +import pandas._testing as tm + + +class TestNonNano: + @pytest.fixture(params=["s", "ms", "us"]) + def unit_str(self, request): + return request.param + + @pytest.fixture + def unit(self, unit_str): + # 7, 8, 9 correspond to second, millisecond, and microsecond, respectively + attr = f"NPY_FR_{unit_str}" + return getattr(NpyDatetimeUnit, attr).value + + @pytest.fixture + def val(self, unit): + # microsecond that would be just out of bounds for nano + us = 9223372800000000 + if unit == NpyDatetimeUnit.NPY_FR_us.value: + value = us + elif unit == NpyDatetimeUnit.NPY_FR_ms.value: + value = us // 1000 + else: + value = us // 1_000_000 + return value + + @pytest.fixture + def td(self, unit, val): + return Timedelta._from_value_and_reso(val, unit) + + def test_from_value_and_reso(self, unit, val): + # Just checking that the fixture is giving us what we asked for + td = Timedelta._from_value_and_reso(val, unit) + assert td._value == val + assert td._creso == unit + assert td.days == 106752 + + def test_unary_non_nano(self, td, unit): + assert abs(td)._creso == unit + assert (-td)._creso == unit + assert (+td)._creso == unit + + def test_sub_preserves_reso(self, td, unit): + res = td - td + expected = Timedelta._from_value_and_reso(0, unit) + assert res == expected + assert res._creso == unit + + def test_mul_preserves_reso(self, td, unit): + # The td fixture should always be far from the implementation + # bound, so doubling does not risk overflow. + res = td * 2 + assert res._value == td._value * 2 + assert res._creso == unit + + def test_cmp_cross_reso(self, td): + # numpy gets this wrong because of silent overflow + other = Timedelta(days=106751, unit="ns") + assert other < td + assert td > other + assert not other == td + assert td != other + + def test_to_pytimedelta(self, td): + res = td.to_pytimedelta() + expected = timedelta(days=106752) + assert type(res) is timedelta + assert res == expected + + def test_to_timedelta64(self, td, unit): + for res in [td.to_timedelta64(), td.to_numpy(), td.asm8]: + assert isinstance(res, np.timedelta64) + assert res.view("i8") == td._value + if unit == NpyDatetimeUnit.NPY_FR_s.value: + assert res.dtype == "m8[s]" + elif unit == NpyDatetimeUnit.NPY_FR_ms.value: + assert res.dtype == "m8[ms]" + elif unit == NpyDatetimeUnit.NPY_FR_us.value: + assert res.dtype == "m8[us]" + + def test_truediv_timedeltalike(self, td): + assert td / td == 1 + assert (2.5 * td) / td == 2.5 + + other = Timedelta(td._value) + msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow." + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td / other + + # Timedelta(other.to_pytimedelta()) has microsecond resolution, + # so the division doesn't require casting all the way to nanos, + # so succeeds + res = other.to_pytimedelta() / td + expected = other.to_pytimedelta() / td.to_pytimedelta() + assert res == expected + + # if there's no overflow, we cast to the higher reso + left = Timedelta._from_value_and_reso(50, NpyDatetimeUnit.NPY_FR_us.value) + right = Timedelta._from_value_and_reso(50, NpyDatetimeUnit.NPY_FR_ms.value) + result = left / right + assert result == 0.001 + + result = right / left + assert result == 1000 + + def test_truediv_numeric(self, td): + assert td / np.nan is NaT + + res = td / 2 + assert res._value == td._value / 2 + assert res._creso == td._creso + + res = td / 2.0 + assert res._value == td._value / 2 + assert res._creso == td._creso + + def test_floordiv_timedeltalike(self, td): + assert td // td == 1 + assert (2.5 * td) // td == 2 + + other = Timedelta(td._value) + msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td // other + + # Timedelta(other.to_pytimedelta()) has microsecond resolution, + # so the floordiv doesn't require casting all the way to nanos, + # so succeeds + res = other.to_pytimedelta() // td + assert res == 0 + + # if there's no overflow, we cast to the higher reso + left = Timedelta._from_value_and_reso(50050, NpyDatetimeUnit.NPY_FR_us.value) + right = Timedelta._from_value_and_reso(50, NpyDatetimeUnit.NPY_FR_ms.value) + result = left // right + assert result == 1 + result = right // left + assert result == 0 + + def test_floordiv_numeric(self, td): + assert td // np.nan is NaT + + res = td // 2 + assert res._value == td._value // 2 + assert res._creso == td._creso + + res = td // 2.0 + assert res._value == td._value // 2 + assert res._creso == td._creso + + assert td // np.array(np.nan) is NaT + + res = td // np.array(2) + assert res._value == td._value // 2 + assert res._creso == td._creso + + res = td // np.array(2.0) + assert res._value == td._value // 2 + assert res._creso == td._creso + + def test_addsub_mismatched_reso(self, td): + # need to cast to since td is out of bounds for ns, so + # so we would raise OverflowError without casting + other = Timedelta(days=1).as_unit("us") + + # td is out of bounds for ns + result = td + other + assert result._creso == other._creso + assert result.days == td.days + 1 + + result = other + td + assert result._creso == other._creso + assert result.days == td.days + 1 + + result = td - other + assert result._creso == other._creso + assert result.days == td.days - 1 + + result = other - td + assert result._creso == other._creso + assert result.days == 1 - td.days + + other2 = Timedelta(500) + msg = "Cannot cast 106752 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td + other2 + with pytest.raises(OutOfBoundsTimedelta, match=msg): + other2 + td + with pytest.raises(OutOfBoundsTimedelta, match=msg): + td - other2 + with pytest.raises(OutOfBoundsTimedelta, match=msg): + other2 - td + + def test_min(self, td): + assert td.min <= td + assert td.min._creso == td._creso + assert td.min._value == NaT._value + 1 + + def test_max(self, td): + assert td.max >= td + assert td.max._creso == td._creso + assert td.max._value == np.iinfo(np.int64).max + + def test_resolution(self, td): + expected = Timedelta._from_value_and_reso(1, td._creso) + result = td.resolution + assert result == expected + assert result._creso == expected._creso + + def test_hash(self) -> None: + # GH#54037 + second_resolution_max = Timedelta(0).as_unit("s").max + + assert hash(second_resolution_max) + + +def test_timedelta_class_min_max_resolution(): + # when accessed on the class (as opposed to an instance), we default + # to nanoseconds + assert Timedelta.min == Timedelta(NaT._value + 1) + assert Timedelta.min._creso == NpyDatetimeUnit.NPY_FR_ns.value + + assert Timedelta.max == Timedelta(np.iinfo(np.int64).max) + assert Timedelta.max._creso == NpyDatetimeUnit.NPY_FR_ns.value + + assert Timedelta.resolution == Timedelta(1) + assert Timedelta.resolution._creso == NpyDatetimeUnit.NPY_FR_ns.value + + +class TestTimedeltaUnaryOps: + def test_invert(self): + td = Timedelta(10, unit="d") + + msg = "bad operand type for unary ~" + with pytest.raises(TypeError, match=msg): + ~td + + # check this matches pytimedelta and timedelta64 + with pytest.raises(TypeError, match=msg): + ~(td.to_pytimedelta()) + + umsg = "ufunc 'invert' not supported for the input types" + with pytest.raises(TypeError, match=umsg): + ~(td.to_timedelta64()) + + def test_unary_ops(self): + td = Timedelta(10, unit="d") + + # __neg__, __pos__ + assert -td == Timedelta(-10, unit="d") + assert -td == Timedelta("-10d") + assert +td == Timedelta(10, unit="d") + + # __abs__, __abs__(__neg__) + assert abs(td) == td + assert abs(-td) == td + assert abs(-td) == Timedelta("10d") + + +class TestTimedeltas: + @pytest.mark.parametrize( + "unit, value, expected", + [ + ("us", 9.999, 9999), + ("ms", 9.999999, 9999999), + ("s", 9.999999999, 9999999999), + ], + ) + def test_rounding_on_int_unit_construction(self, unit, value, expected): + # GH 12690 + result = Timedelta(value, unit=unit) + assert result._value == expected + result = Timedelta(str(value) + unit) + assert result._value == expected + + def test_total_seconds_scalar(self): + # see gh-10939 + rng = Timedelta("1 days, 10:11:12.100123456") + expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456.0 / 1e9 + tm.assert_almost_equal(rng.total_seconds(), expt) + + rng = Timedelta(np.nan) + assert np.isnan(rng.total_seconds()) + + def test_conversion(self): + for td in [Timedelta(10, unit="d"), Timedelta("1 days, 10:11:12.012345")]: + pydt = td.to_pytimedelta() + assert td == Timedelta(pydt) + assert td == pydt + assert isinstance(pydt, timedelta) and not isinstance(pydt, Timedelta) + + assert td == np.timedelta64(td._value, "ns") + td64 = td.to_timedelta64() + + assert td64 == np.timedelta64(td._value, "ns") + assert td == td64 + + assert isinstance(td64, np.timedelta64) + + # this is NOT equal and cannot be roundtripped (because of the nanos) + td = Timedelta("1 days, 10:11:12.012345678") + assert td != td.to_pytimedelta() + + def test_fields(self): + def check(value): + # that we are int + assert isinstance(value, int) + + # compat to datetime.timedelta + rng = to_timedelta("1 days, 10:11:12") + assert rng.days == 1 + assert rng.seconds == 10 * 3600 + 11 * 60 + 12 + assert rng.microseconds == 0 + assert rng.nanoseconds == 0 + + msg = "'Timedelta' object has no attribute '{}'" + with pytest.raises(AttributeError, match=msg.format("hours")): + rng.hours + with pytest.raises(AttributeError, match=msg.format("minutes")): + rng.minutes + with pytest.raises(AttributeError, match=msg.format("milliseconds")): + rng.milliseconds + + # GH 10050 + check(rng.days) + check(rng.seconds) + check(rng.microseconds) + check(rng.nanoseconds) + + td = Timedelta("-1 days, 10:11:12") + assert abs(td) == Timedelta("13:48:48") + assert str(td) == "-1 days +10:11:12" + assert -td == Timedelta("0 days 13:48:48") + assert -Timedelta("-1 days, 10:11:12")._value == 49728000000000 + assert Timedelta("-1 days, 10:11:12")._value == -49728000000000 + + rng = to_timedelta("-1 days, 10:11:12.100123456") + assert rng.days == -1 + assert rng.seconds == 10 * 3600 + 11 * 60 + 12 + assert rng.microseconds == 100 * 1000 + 123 + assert rng.nanoseconds == 456 + msg = "'Timedelta' object has no attribute '{}'" + with pytest.raises(AttributeError, match=msg.format("hours")): + rng.hours + with pytest.raises(AttributeError, match=msg.format("minutes")): + rng.minutes + with pytest.raises(AttributeError, match=msg.format("milliseconds")): + rng.milliseconds + + # components + tup = to_timedelta(-1, "us").components + assert tup.days == -1 + assert tup.hours == 23 + assert tup.minutes == 59 + assert tup.seconds == 59 + assert tup.milliseconds == 999 + assert tup.microseconds == 999 + assert tup.nanoseconds == 0 + + # GH 10050 + check(tup.days) + check(tup.hours) + check(tup.minutes) + check(tup.seconds) + check(tup.milliseconds) + check(tup.microseconds) + check(tup.nanoseconds) + + tup = Timedelta("-1 days 1 us").components + assert tup.days == -2 + assert tup.hours == 23 + assert tup.minutes == 59 + assert tup.seconds == 59 + assert tup.milliseconds == 999 + assert tup.microseconds == 999 + assert tup.nanoseconds == 0 + + # TODO: this is a test of to_timedelta string parsing + def test_iso_conversion(self): + # GH #21877 + expected = Timedelta(1, unit="s") + assert to_timedelta("P0DT0H0M1S") == expected + + # TODO: this is a test of to_timedelta returning NaT + def test_nat_converters(self): + result = to_timedelta("nat").to_numpy() + assert result.dtype.kind == "M" + assert result.astype("int64") == iNaT + + result = to_timedelta("nan").to_numpy() + assert result.dtype.kind == "M" + assert result.astype("int64") == iNaT + + def test_numeric_conversions(self): + assert Timedelta(0) == np.timedelta64(0, "ns") + assert Timedelta(10) == np.timedelta64(10, "ns") + assert Timedelta(10, unit="ns") == np.timedelta64(10, "ns") + + assert Timedelta(10, unit="us") == np.timedelta64(10, "us") + assert Timedelta(10, unit="ms") == np.timedelta64(10, "ms") + assert Timedelta(10, unit="s") == np.timedelta64(10, "s") + assert Timedelta(10, unit="d") == np.timedelta64(10, "D") + + def test_timedelta_conversions(self): + assert Timedelta(timedelta(seconds=1)) == np.timedelta64(1, "s").astype( + "m8[ns]" + ) + assert Timedelta(timedelta(microseconds=1)) == np.timedelta64(1, "us").astype( + "m8[ns]" + ) + assert Timedelta(timedelta(days=1)) == np.timedelta64(1, "D").astype("m8[ns]") + + def test_to_numpy_alias(self): + # GH 24653: alias .to_numpy() for scalars + td = Timedelta("10m7s") + assert td.to_timedelta64() == td.to_numpy() + + # GH#44460 + msg = "dtype and copy arguments are ignored" + with pytest.raises(ValueError, match=msg): + td.to_numpy("m8[s]") + with pytest.raises(ValueError, match=msg): + td.to_numpy(copy=True) + + def test_identity(self): + td = Timedelta(10, unit="d") + assert isinstance(td, Timedelta) + assert isinstance(td, timedelta) + + def test_short_format_converters(self): + def conv(v): + return v.astype("m8[ns]") + + assert Timedelta("10") == np.timedelta64(10, "ns") + assert Timedelta("10ns") == np.timedelta64(10, "ns") + assert Timedelta("100") == np.timedelta64(100, "ns") + assert Timedelta("100ns") == np.timedelta64(100, "ns") + + assert Timedelta("1000") == np.timedelta64(1000, "ns") + assert Timedelta("1000ns") == np.timedelta64(1000, "ns") + assert Timedelta("1000NS") == np.timedelta64(1000, "ns") + + assert Timedelta("10us") == np.timedelta64(10000, "ns") + assert Timedelta("100us") == np.timedelta64(100000, "ns") + assert Timedelta("1000us") == np.timedelta64(1000000, "ns") + assert Timedelta("1000Us") == np.timedelta64(1000000, "ns") + assert Timedelta("1000uS") == np.timedelta64(1000000, "ns") + + assert Timedelta("1ms") == np.timedelta64(1000000, "ns") + assert Timedelta("10ms") == np.timedelta64(10000000, "ns") + assert Timedelta("100ms") == np.timedelta64(100000000, "ns") + assert Timedelta("1000ms") == np.timedelta64(1000000000, "ns") + + assert Timedelta("-1s") == -np.timedelta64(1000000000, "ns") + assert Timedelta("1s") == np.timedelta64(1000000000, "ns") + assert Timedelta("10s") == np.timedelta64(10000000000, "ns") + assert Timedelta("100s") == np.timedelta64(100000000000, "ns") + assert Timedelta("1000s") == np.timedelta64(1000000000000, "ns") + + assert Timedelta("1d") == conv(np.timedelta64(1, "D")) + assert Timedelta("-1d") == -conv(np.timedelta64(1, "D")) + assert Timedelta("1D") == conv(np.timedelta64(1, "D")) + assert Timedelta("10D") == conv(np.timedelta64(10, "D")) + assert Timedelta("100D") == conv(np.timedelta64(100, "D")) + assert Timedelta("1000D") == conv(np.timedelta64(1000, "D")) + assert Timedelta("10000D") == conv(np.timedelta64(10000, "D")) + + # space + assert Timedelta(" 10000D ") == conv(np.timedelta64(10000, "D")) + assert Timedelta(" - 10000D ") == -conv(np.timedelta64(10000, "D")) + + # invalid + msg = "invalid unit abbreviation" + with pytest.raises(ValueError, match=msg): + Timedelta("1foo") + msg = "unit abbreviation w/o a number" + with pytest.raises(ValueError, match=msg): + Timedelta("foo") + + def test_full_format_converters(self): + def conv(v): + return v.astype("m8[ns]") + + d1 = np.timedelta64(1, "D") + + assert Timedelta("1days") == conv(d1) + assert Timedelta("1days,") == conv(d1) + assert Timedelta("- 1days,") == -conv(d1) + + assert Timedelta("00:00:01") == conv(np.timedelta64(1, "s")) + assert Timedelta("06:00:01") == conv(np.timedelta64(6 * 3600 + 1, "s")) + assert Timedelta("06:00:01.0") == conv(np.timedelta64(6 * 3600 + 1, "s")) + assert Timedelta("06:00:01.01") == conv( + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms") + ) + + assert Timedelta("- 1days, 00:00:01") == conv(-d1 + np.timedelta64(1, "s")) + assert Timedelta("1days, 06:00:01") == conv( + d1 + np.timedelta64(6 * 3600 + 1, "s") + ) + assert Timedelta("1days, 06:00:01.01") == conv( + d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, "ms") + ) + + # invalid + msg = "have leftover units" + with pytest.raises(ValueError, match=msg): + Timedelta("- 1days, 00") + + def test_pickle(self): + v = Timedelta("1 days 10:11:12.0123456") + v_p = tm.round_trip_pickle(v) + assert v == v_p + + def test_timedelta_hash_equality(self): + # GH 11129 + v = Timedelta(1, "D") + td = timedelta(days=1) + assert hash(v) == hash(td) + + d = {td: 2} + assert d[v] == 2 + + tds = [Timedelta(seconds=1) + Timedelta(days=n) for n in range(20)] + assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds) + + # python timedeltas drop ns resolution + ns_td = Timedelta(1, "ns") + assert hash(ns_td) != hash(ns_td.to_pytimedelta()) + + @pytest.mark.skip_ubsan + @pytest.mark.xfail( + reason="pd.Timedelta violates the Python hash invariant (GH#44504).", + ) + @given( + st.integers( + min_value=(-sys.maxsize - 1) // 500, + max_value=sys.maxsize // 500, + ) + ) + def test_hash_equality_invariance(self, half_microseconds: int) -> None: + # GH#44504 + + nanoseconds = half_microseconds * 500 + + pandas_timedelta = Timedelta(nanoseconds) + numpy_timedelta = np.timedelta64(nanoseconds) + + # See: https://docs.python.org/3/glossary.html#term-hashable + # Hashable objects which compare equal must have the same hash value. + assert pandas_timedelta != numpy_timedelta or hash(pandas_timedelta) == hash( + numpy_timedelta + ) + + def test_implementation_limits(self): + min_td = Timedelta(Timedelta.min) + max_td = Timedelta(Timedelta.max) + + # GH 12727 + # timedelta limits correspond to int64 boundaries + assert min_td._value == iNaT + 1 + assert max_td._value == lib.i8max + + # Beyond lower limit, a NAT before the Overflow + assert (min_td - Timedelta(1, "ns")) is NaT + + msg = "int too (large|big) to convert" + with pytest.raises(OverflowError, match=msg): + min_td - Timedelta(2, "ns") + + with pytest.raises(OverflowError, match=msg): + max_td + Timedelta(1, "ns") + + # Same tests using the internal nanosecond values + td = Timedelta(min_td._value - 1, "ns") + assert td is NaT + + msg = "Cannot cast -9223372036854775809 from ns to 'ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(min_td._value - 2, "ns") + + msg = "Cannot cast 9223372036854775808 from ns to 'ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + Timedelta(max_td._value + 1, "ns") + + def test_total_seconds_precision(self): + # GH 19458 + assert Timedelta("30s").total_seconds() == 30.0 + assert Timedelta("0").total_seconds() == 0.0 + assert Timedelta("-2s").total_seconds() == -2.0 + assert Timedelta("5.324s").total_seconds() == 5.324 + assert (Timedelta("30s").total_seconds() - 30.0) < 1e-20 + assert (30.0 - Timedelta("30s").total_seconds()) < 1e-20 + + def test_resolution_string(self): + assert Timedelta(days=1).resolution_string == "D" + assert Timedelta(days=1, hours=6).resolution_string == "h" + assert Timedelta(days=1, minutes=6).resolution_string == "min" + assert Timedelta(days=1, seconds=6).resolution_string == "s" + assert Timedelta(days=1, milliseconds=6).resolution_string == "ms" + assert Timedelta(days=1, microseconds=6).resolution_string == "us" + assert Timedelta(days=1, nanoseconds=6).resolution_string == "ns" + + def test_resolution_deprecated(self): + # GH#21344 + td = Timedelta(days=4, hours=3) + result = td.resolution + assert result == Timedelta(nanoseconds=1) + + # Check that the attribute is available on the class, mirroring + # the stdlib timedelta behavior + result = Timedelta.resolution + assert result == Timedelta(nanoseconds=1) + + +@pytest.mark.parametrize( + "value, expected", + [ + (Timedelta("10s"), True), + (Timedelta("-10s"), True), + (Timedelta(10, unit="ns"), True), + (Timedelta(0, unit="ns"), False), + (Timedelta(-10, unit="ns"), True), + (Timedelta(None), True), + (NaT, True), + ], +) +def test_truthiness(value, expected): + # https://github.com/pandas-dev/pandas/issues/21484 + assert bool(value) is expected + + +def test_timedelta_attribute_precision(): + # GH 31354 + td = Timedelta(1552211999999999872, unit="ns") + result = td.days * 86400 + result += td.seconds + result *= 1000000 + result += td.microseconds + result *= 1000 + result += td.nanoseconds + expected = td._value + assert result == expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5490851402382e4f8f5f9a5e420429084ddcd014 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_arithmetic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb1a5b758596c1bd3161905d1fdbc7a0f5407b03 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_comparisons.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_comparisons.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..335c7bdc8dc8c6bfbac0c6ae1e2a6cfefaf68d12 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_comparisons.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e8de4ce086a7dc3aeae9a52b3c11a671375bc74 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_formats.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9094cb211d3e4e84415be246062eaeca11fc29ef Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_formats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timestamp.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timestamp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6fc2946363088a7c42406e7835086b6f0363ffd Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timestamp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timezones.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timezones.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cc7295503baac42e6b6360bb30ac24571911063 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/scalar/timestamp/__pycache__/test_timezones.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_base_indexer.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_base_indexer.py new file mode 100644 index 0000000000000000000000000000000000000000..104acc1d527cb8dbd92b20211fb760dd413a0757 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_base_indexer.py @@ -0,0 +1,519 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, + concat, + date_range, +) +import pandas._testing as tm +from pandas.api.indexers import ( + BaseIndexer, + FixedForwardWindowIndexer, +) +from pandas.core.indexers.objects import ( + ExpandingIndexer, + FixedWindowIndexer, + VariableOffsetWindowIndexer, +) + +from pandas.tseries.offsets import BusinessDay + + +def test_bad_get_window_bounds_signature(): + class BadIndexer(BaseIndexer): + def get_window_bounds(self): + return None + + indexer = BadIndexer() + with pytest.raises(ValueError, match="BadIndexer does not implement"): + Series(range(5)).rolling(indexer) + + +def test_expanding_indexer(): + s = Series(range(10)) + indexer = ExpandingIndexer() + result = s.rolling(indexer).mean() + expected = s.expanding().mean() + tm.assert_series_equal(result, expected) + + +def test_indexer_constructor_arg(): + # Example found in computation.rst + use_expanding = [True, False, True, False, True] + df = DataFrame({"values": range(5)}) + + class CustomIndexer(BaseIndexer): + def get_window_bounds(self, num_values, min_periods, center, closed, step): + start = np.empty(num_values, dtype=np.int64) + end = np.empty(num_values, dtype=np.int64) + for i in range(num_values): + if self.use_expanding[i]: + start[i] = 0 + end[i] = i + 1 + else: + start[i] = i + end[i] = i + self.window_size + return start, end + + indexer = CustomIndexer(window_size=1, use_expanding=use_expanding) + result = df.rolling(indexer).sum() + expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]}) + tm.assert_frame_equal(result, expected) + + +def test_indexer_accepts_rolling_args(): + df = DataFrame({"values": range(5)}) + + class CustomIndexer(BaseIndexer): + def get_window_bounds(self, num_values, min_periods, center, closed, step): + start = np.empty(num_values, dtype=np.int64) + end = np.empty(num_values, dtype=np.int64) + for i in range(num_values): + if ( + center + and min_periods == 1 + and closed == "both" + and step == 1 + and i == 2 + ): + start[i] = 0 + end[i] = num_values + else: + start[i] = i + end[i] = i + self.window_size + return start, end + + indexer = CustomIndexer(window_size=1) + result = df.rolling( + indexer, center=True, min_periods=1, closed="both", step=1 + ).sum() + expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "func,np_func,expected,np_kwargs", + [ + ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {}), + ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {}), + ( + "max", + np.max, + [2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan], + {}, + ), + ( + "std", + np.std, + [ + 1.0, + 1.0, + 1.0, + 55.71654452, + 54.85739087, + 53.9845657, + 1.0, + 1.0, + 0.70710678, + np.nan, + ], + {"ddof": 1}, + ), + ( + "var", + np.var, + [ + 1.0, + 1.0, + 1.0, + 3104.333333, + 3009.333333, + 2914.333333, + 1.0, + 1.0, + 0.500000, + np.nan, + ], + {"ddof": 1}, + ), + ( + "median", + np.median, + [1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 8.5, np.nan], + {}, + ), + ], +) +def test_rolling_forward_window( + frame_or_series, func, np_func, expected, np_kwargs, step +): + # GH 32865 + values = np.arange(10.0) + values[5] = 100.0 + + indexer = FixedForwardWindowIndexer(window_size=3) + + match = "Forward-looking windows can't have center=True" + with pytest.raises(ValueError, match=match): + rolling = frame_or_series(values).rolling(window=indexer, center=True) + getattr(rolling, func)() + + match = "Forward-looking windows don't support setting the closed argument" + with pytest.raises(ValueError, match=match): + rolling = frame_or_series(values).rolling(window=indexer, closed="right") + getattr(rolling, func)() + + rolling = frame_or_series(values).rolling(window=indexer, min_periods=2, step=step) + result = getattr(rolling, func)() + + # Check that the function output matches the explicitly provided array + expected = frame_or_series(expected)[::step] + tm.assert_equal(result, expected) + + # Check that the rolling function output matches applying an alternative + # function to the rolling window object + expected2 = frame_or_series(rolling.apply(lambda x: np_func(x, **np_kwargs))) + tm.assert_equal(result, expected2) + + # Check that the function output matches applying an alternative function + # if min_periods isn't specified + # GH 39604: After count-min_periods deprecation, apply(lambda x: len(x)) + # is equivalent to count after setting min_periods=0 + min_periods = 0 if func == "count" else None + rolling3 = frame_or_series(values).rolling(window=indexer, min_periods=min_periods) + result3 = getattr(rolling3, func)() + expected3 = frame_or_series(rolling3.apply(lambda x: np_func(x, **np_kwargs))) + tm.assert_equal(result3, expected3) + + +def test_rolling_forward_skewness(frame_or_series, step): + values = np.arange(10.0) + values[5] = 100.0 + + indexer = FixedForwardWindowIndexer(window_size=5) + rolling = frame_or_series(values).rolling(window=indexer, min_periods=3, step=step) + result = rolling.skew() + + expected = frame_or_series( + [ + 0.0, + 2.232396, + 2.229508, + 2.228340, + 2.229091, + 2.231989, + 0.0, + 0.0, + np.nan, + np.nan, + ] + )[::step] + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "func,expected", + [ + ("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]), + ( + "corr", + [ + 1.0, + 1.0, + 1.0, + 0.8704775290207161, + 0.018229084250926637, + -0.861357304646493, + 1.0, + 1.0, + np.nan, + np.nan, + ], + ), + ], +) +def test_rolling_forward_cov_corr(func, expected): + values1 = np.arange(10).reshape(-1, 1) + values2 = values1 * 2 + values1[5, 0] = 100 + values = np.concatenate([values1, values2], axis=1) + + indexer = FixedForwardWindowIndexer(window_size=3) + rolling = DataFrame(values).rolling(window=indexer, min_periods=3) + # We are interested in checking only pairwise covariance / correlation + result = getattr(rolling, func)().loc[(slice(None), 1), 0] + result = result.reset_index(drop=True) + expected = Series(expected).reset_index(drop=True) + expected.name = result.name + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "closed,expected_data", + [ + ["right", [0.0, 1.0, 2.0, 3.0, 7.0, 12.0, 6.0, 7.0, 8.0, 9.0]], + ["left", [0.0, 0.0, 1.0, 2.0, 5.0, 9.0, 5.0, 6.0, 7.0, 8.0]], + ], +) +def test_non_fixed_variable_window_indexer(closed, expected_data): + index = date_range("2020", periods=10) + df = DataFrame(range(10), index=index) + offset = BusinessDay(1) + indexer = VariableOffsetWindowIndexer(index=index, offset=offset) + result = df.rolling(indexer, closed=closed).sum() + expected = DataFrame(expected_data, index=index) + tm.assert_frame_equal(result, expected) + + +def test_variableoffsetwindowindexer_not_dti(): + # GH 54379 + with pytest.raises(ValueError, match="index must be a DatetimeIndex."): + VariableOffsetWindowIndexer(index="foo", offset=BusinessDay(1)) + + +def test_variableoffsetwindowindexer_not_offset(): + # GH 54379 + idx = date_range("2020", periods=10) + with pytest.raises(ValueError, match="offset must be a DateOffset-like object."): + VariableOffsetWindowIndexer(index=idx, offset="foo") + + +def test_fixed_forward_indexer_count(step): + # GH: 35579 + df = DataFrame({"b": [None, None, None, 7]}) + indexer = FixedForwardWindowIndexer(window_size=2) + result = df.rolling(window=indexer, min_periods=0, step=step).count() + expected = DataFrame({"b": [0.0, 0.0, 1.0, 1.0]})[::step] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + ("end_value", "values"), [(1, [0.0, 1, 1, 3, 2]), (-1, [0.0, 1, 0, 3, 1])] +) +@pytest.mark.parametrize(("func", "args"), [("median", []), ("quantile", [0.5])]) +def test_indexer_quantile_sum(end_value, values, func, args): + # GH 37153 + class CustomIndexer(BaseIndexer): + def get_window_bounds(self, num_values, min_periods, center, closed, step): + start = np.empty(num_values, dtype=np.int64) + end = np.empty(num_values, dtype=np.int64) + for i in range(num_values): + if self.use_expanding[i]: + start[i] = 0 + end[i] = max(i + end_value, 1) + else: + start[i] = i + end[i] = i + self.window_size + return start, end + + use_expanding = [True, False, True, False, True] + df = DataFrame({"values": range(5)}) + + indexer = CustomIndexer(window_size=1, use_expanding=use_expanding) + result = getattr(df.rolling(indexer), func)(*args) + expected = DataFrame({"values": values}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "indexer_class", [FixedWindowIndexer, FixedForwardWindowIndexer, ExpandingIndexer] +) +@pytest.mark.parametrize("window_size", [1, 2, 12]) +@pytest.mark.parametrize( + "df_data", + [ + {"a": [1, 1], "b": [0, 1]}, + {"a": [1, 2], "b": [0, 1]}, + {"a": [1] * 16, "b": [np.nan, 1, 2, np.nan] + list(range(4, 16))}, + ], +) +def test_indexers_are_reusable_after_groupby_rolling( + indexer_class, window_size, df_data +): + # GH 43267 + df = DataFrame(df_data) + num_trials = 3 + indexer = indexer_class(window_size=window_size) + original_window_size = indexer.window_size + for i in range(num_trials): + df.groupby("a")["b"].rolling(window=indexer, min_periods=1).mean() + assert indexer.window_size == original_window_size + + +@pytest.mark.parametrize( + "window_size, num_values, expected_start, expected_end", + [ + (1, 1, [0], [1]), + (1, 2, [0, 1], [1, 2]), + (2, 1, [0], [1]), + (2, 2, [0, 1], [2, 2]), + (5, 12, range(12), list(range(5, 12)) + [12] * 5), + (12, 5, range(5), [5] * 5), + (0, 0, np.array([]), np.array([])), + (1, 0, np.array([]), np.array([])), + (0, 1, [0], [0]), + ], +) +def test_fixed_forward_indexer_bounds( + window_size, num_values, expected_start, expected_end, step +): + # GH 43267 + indexer = FixedForwardWindowIndexer(window_size=window_size) + start, end = indexer.get_window_bounds(num_values=num_values, step=step) + + tm.assert_numpy_array_equal( + start, np.array(expected_start[::step]), check_dtype=False + ) + tm.assert_numpy_array_equal(end, np.array(expected_end[::step]), check_dtype=False) + assert len(start) == len(end) + + +@pytest.mark.parametrize( + "df, window_size, expected", + [ + ( + DataFrame({"b": [0, 1, 2], "a": [1, 2, 2]}), + 2, + Series( + [0, 1.5, 2.0], + index=MultiIndex.from_arrays([[1, 2, 2], range(3)], names=["a", None]), + name="b", + dtype=np.float64, + ), + ), + ( + DataFrame( + { + "b": [np.nan, 1, 2, np.nan] + list(range(4, 18)), + "a": [1] * 7 + [2] * 11, + "c": range(18), + } + ), + 12, + Series( + [ + 3.6, + 3.6, + 4.25, + 5.0, + 5.0, + 5.5, + 6.0, + 12.0, + 12.5, + 13.0, + 13.5, + 14.0, + 14.5, + 15.0, + 15.5, + 16.0, + 16.5, + 17.0, + ], + index=MultiIndex.from_arrays( + [[1] * 7 + [2] * 11, range(18)], names=["a", None] + ), + name="b", + dtype=np.float64, + ), + ), + ], +) +def test_rolling_groupby_with_fixed_forward_specific(df, window_size, expected): + # GH 43267 + indexer = FixedForwardWindowIndexer(window_size=window_size) + result = df.groupby("a")["b"].rolling(window=indexer, min_periods=1).mean() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "group_keys", + [ + (1,), + (1, 2), + (2, 1), + (1, 1, 2), + (1, 2, 1), + (1, 1, 2, 2), + (1, 2, 3, 2, 3), + (1, 1, 2) * 4, + (1, 2, 3) * 5, + ], +) +@pytest.mark.parametrize("window_size", [1, 2, 3, 4, 5, 8, 20]) +def test_rolling_groupby_with_fixed_forward_many(group_keys, window_size): + # GH 43267 + df = DataFrame( + { + "a": np.array(list(group_keys)), + "b": np.arange(len(group_keys), dtype=np.float64) + 17, + "c": np.arange(len(group_keys), dtype=np.int64), + } + ) + + indexer = FixedForwardWindowIndexer(window_size=window_size) + result = df.groupby("a")["b"].rolling(window=indexer, min_periods=1).sum() + result.index.names = ["a", "c"] + + groups = df.groupby("a")[["a", "b", "c"]] + manual = concat( + [ + g.assign( + b=[ + g["b"].iloc[i : i + window_size].sum(min_count=1) + for i in range(len(g)) + ] + ) + for _, g in groups + ] + ) + manual = manual.set_index(["a", "c"])["b"] + + tm.assert_series_equal(result, manual) + + +def test_unequal_start_end_bounds(): + class CustomIndexer(BaseIndexer): + def get_window_bounds(self, num_values, min_periods, center, closed, step): + return np.array([1]), np.array([1, 2]) + + indexer = CustomIndexer() + roll = Series(1).rolling(indexer) + match = "start" + with pytest.raises(ValueError, match=match): + roll.mean() + + with pytest.raises(ValueError, match=match): + next(iter(roll)) + + with pytest.raises(ValueError, match=match): + roll.corr(pairwise=True) + + with pytest.raises(ValueError, match=match): + roll.cov(pairwise=True) + + +def test_unequal_bounds_to_object(): + # GH 44470 + class CustomIndexer(BaseIndexer): + def get_window_bounds(self, num_values, min_periods, center, closed, step): + return np.array([1]), np.array([2]) + + indexer = CustomIndexer() + roll = Series([1, 1]).rolling(indexer) + match = "start and end" + with pytest.raises(ValueError, match=match): + roll.mean() + + with pytest.raises(ValueError, match=match): + next(iter(roll)) + + with pytest.raises(ValueError, match=match): + roll.corr(pairwise=True) + + with pytest.raises(ValueError, match=match): + roll.cov(pairwise=True) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_ewm.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_ewm.py new file mode 100644 index 0000000000000000000000000000000000000000..058e5ce36e53e8fc43355ffc86a2336316ab09a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_ewm.py @@ -0,0 +1,727 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Series, + date_range, +) +import pandas._testing as tm + + +def test_doc_string(): + df = DataFrame({"B": [0, 1, 2, np.nan, 4]}) + df + df.ewm(com=0.5).mean() + + +def test_constructor(frame_or_series): + c = frame_or_series(range(5)).ewm + + # valid + c(com=0.5) + c(span=1.5) + c(alpha=0.5) + c(halflife=0.75) + c(com=0.5, span=None) + c(alpha=0.5, com=None) + c(halflife=0.75, alpha=None) + + # not valid: mutually exclusive + msg = "comass, span, halflife, and alpha are mutually exclusive" + with pytest.raises(ValueError, match=msg): + c(com=0.5, alpha=0.5) + with pytest.raises(ValueError, match=msg): + c(span=1.5, halflife=0.75) + with pytest.raises(ValueError, match=msg): + c(alpha=0.5, span=1.5) + + # not valid: com < 0 + msg = "comass must satisfy: comass >= 0" + with pytest.raises(ValueError, match=msg): + c(com=-0.5) + + # not valid: span < 1 + msg = "span must satisfy: span >= 1" + with pytest.raises(ValueError, match=msg): + c(span=0.5) + + # not valid: halflife <= 0 + msg = "halflife must satisfy: halflife > 0" + with pytest.raises(ValueError, match=msg): + c(halflife=0) + + # not valid: alpha <= 0 or alpha > 1 + msg = "alpha must satisfy: 0 < alpha <= 1" + for alpha in (-0.5, 1.5): + with pytest.raises(ValueError, match=msg): + c(alpha=alpha) + + +def test_ewma_times_not_datetime_type(): + msg = r"times must be datetime64 dtype." + with pytest.raises(ValueError, match=msg): + Series(range(5)).ewm(times=np.arange(5)) + + +def test_ewma_times_not_same_length(): + msg = "times must be the same length as the object." + with pytest.raises(ValueError, match=msg): + Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]")) + + +def test_ewma_halflife_not_correct_type(): + msg = "halflife must be a timedelta convertible object" + with pytest.raises(ValueError, match=msg): + Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]")) + + +def test_ewma_halflife_without_times(halflife_with_times): + msg = "halflife can only be a timedelta convertible argument if times is not None." + with pytest.raises(ValueError, match=msg): + Series(range(5)).ewm(halflife=halflife_with_times) + + +@pytest.mark.parametrize( + "times", + [ + np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"), + date_range("2000", freq="D", periods=10), + date_range("2000", freq="D", periods=10).tz_localize("UTC"), + ], +) +@pytest.mark.parametrize("min_periods", [0, 2]) +def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods): + halflife = halflife_with_times + data = np.arange(10.0) + data[::2] = np.nan + df = DataFrame({"A": data}) + result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean() + expected = df.ewm(halflife=1.0, min_periods=min_periods).mean() + tm.assert_frame_equal(result, expected) + + +def test_ewma_with_times_variable_spacing(tz_aware_fixture, unit): + tz = tz_aware_fixture + halflife = "23 days" + times = ( + DatetimeIndex(["2020-01-01", "2020-01-10T00:04:05", "2020-02-23T05:00:23"]) + .tz_localize(tz) + .as_unit(unit) + ) + data = np.arange(3) + df = DataFrame(data) + result = df.ewm(halflife=halflife, times=times).mean() + expected = DataFrame([0.0, 0.5674161888241773, 1.545239952073459]) + tm.assert_frame_equal(result, expected) + + +def test_ewm_with_nat_raises(halflife_with_times): + # GH#38535 + ser = Series(range(1)) + times = DatetimeIndex(["NaT"]) + with pytest.raises(ValueError, match="Cannot convert NaT values to integer"): + ser.ewm(com=0.1, halflife=halflife_with_times, times=times) + + +def test_ewm_with_times_getitem(halflife_with_times): + # GH 40164 + halflife = halflife_with_times + data = np.arange(10.0) + data[::2] = np.nan + times = date_range("2000", freq="D", periods=10) + df = DataFrame({"A": data, "B": data}) + result = df.ewm(halflife=halflife, times=times)["A"].mean() + expected = df.ewm(halflife=1.0)["A"].mean() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("arg", ["com", "halflife", "span", "alpha"]) +def test_ewm_getitem_attributes_retained(arg, adjust, ignore_na): + # GH 40164 + kwargs = {arg: 1, "adjust": adjust, "ignore_na": ignore_na} + ewm = DataFrame({"A": range(1), "B": range(1)}).ewm(**kwargs) + expected = {attr: getattr(ewm, attr) for attr in ewm._attributes} + ewm_slice = ewm["A"] + result = {attr: getattr(ewm, attr) for attr in ewm_slice._attributes} + assert result == expected + + +def test_ewma_times_adjust_false_raises(): + # GH 40098 + with pytest.raises( + NotImplementedError, match="times is not supported with adjust=False." + ): + Series(range(1)).ewm( + 0.1, adjust=False, times=date_range("2000", freq="D", periods=1) + ) + + +@pytest.mark.parametrize( + "func, expected", + [ + [ + "mean", + DataFrame( + { + 0: range(5), + 1: range(4, 9), + 2: [7.428571, 9, 10.571429, 12.142857, 13.714286], + }, + dtype=float, + ), + ], + [ + "std", + DataFrame( + { + 0: [np.nan] * 5, + 1: [4.242641] * 5, + 2: [4.6291, 5.196152, 5.781745, 6.380775, 6.989788], + } + ), + ], + [ + "var", + DataFrame( + { + 0: [np.nan] * 5, + 1: [18.0] * 5, + 2: [21.428571, 27, 33.428571, 40.714286, 48.857143], + } + ), + ], + ], +) +def test_float_dtype_ewma(func, expected, float_numpy_dtype): + # GH#42452 + + df = DataFrame( + {0: range(5), 1: range(6, 11), 2: range(10, 20, 2)}, dtype=float_numpy_dtype + ) + msg = "Support for axis=1 in DataFrame.ewm is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + e = df.ewm(alpha=0.5, axis=1) + result = getattr(e, func)() + + tm.assert_frame_equal(result, expected) + + +def test_times_string_col_raises(): + # GH 43265 + df = DataFrame( + {"A": np.arange(10.0), "time_col": date_range("2000", freq="D", periods=10)} + ) + with pytest.raises(ValueError, match="times must be datetime64"): + df.ewm(halflife="1 day", min_periods=0, times="time_col") + + +def test_ewm_sum_adjust_false_notimplemented(): + data = Series(range(1)).ewm(com=1, adjust=False) + with pytest.raises(NotImplementedError, match="sum is not"): + data.sum() + + +@pytest.mark.parametrize( + "expected_data, ignore", + [[[10.0, 5.0, 2.5, 11.25], False], [[10.0, 5.0, 5.0, 12.5], True]], +) +def test_ewm_sum(expected_data, ignore): + # xref from Numbagg tests + # https://github.com/numbagg/numbagg/blob/v0.2.1/numbagg/test/test_moving.py#L50 + data = Series([10, 0, np.nan, 10]) + result = data.ewm(alpha=0.5, ignore_na=ignore).sum() + expected = Series(expected_data) + tm.assert_series_equal(result, expected) + + +def test_ewma_adjust(): + vals = Series(np.zeros(1000)) + vals[5] = 1 + result = vals.ewm(span=100, adjust=False).mean().sum() + assert np.abs(result - 1) < 1e-2 + + +def test_ewma_cases(adjust, ignore_na): + # try adjust/ignore_na args matrix + + s = Series([1.0, 2.0, 4.0, 8.0]) + + if adjust: + expected = Series([1.0, 1.6, 2.736842, 4.923077]) + else: + expected = Series([1.0, 1.333333, 2.222222, 4.148148]) + + result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() + tm.assert_series_equal(result, expected) + + +def test_ewma_nan_handling(): + s = Series([1.0] + [np.nan] * 5 + [1.0]) + result = s.ewm(com=5).mean() + tm.assert_series_equal(result, Series([1.0] * len(s))) + + s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0]) + result = s.ewm(com=5).mean() + tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4)) + + +@pytest.mark.parametrize( + "s, adjust, ignore_na, w", + [ + ( + Series([np.nan, 1.0, 101.0]), + True, + False, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0], + ), + ( + Series([np.nan, 1.0, 101.0]), + True, + True, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0], + ), + ( + Series([np.nan, 1.0, 101.0]), + False, + False, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))], + ), + ( + Series([np.nan, 1.0, 101.0]), + False, + True, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))], + ), + ( + Series([1.0, np.nan, 101.0]), + True, + False, + [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0], + ), + ( + Series([1.0, np.nan, 101.0]), + True, + True, + [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0], + ), + ( + Series([1.0, np.nan, 101.0]), + False, + False, + [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))], + ), + ( + Series([1.0, np.nan, 101.0]), + False, + True, + [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))], + ), + ( + Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), + True, + False, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan], + ), + ( + Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), + True, + True, + [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan], + ), + ( + Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), + False, + False, + [ + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))) ** 3, + np.nan, + np.nan, + (1.0 / (1.0 + 2.0)), + np.nan, + ], + ), + ( + Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]), + False, + True, + [ + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))), + np.nan, + np.nan, + (1.0 / (1.0 + 2.0)), + np.nan, + ], + ), + ( + Series([1.0, np.nan, 101.0, 50.0]), + True, + False, + [ + (1.0 - (1.0 / (1.0 + 2.0))) ** 3, + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))), + 1.0, + ], + ), + ( + Series([1.0, np.nan, 101.0, 50.0]), + True, + True, + [ + (1.0 - (1.0 / (1.0 + 2.0))) ** 2, + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))), + 1.0, + ], + ), + ( + Series([1.0, np.nan, 101.0, 50.0]), + False, + False, + [ + (1.0 - (1.0 / (1.0 + 2.0))) ** 3, + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)), + (1.0 / (1.0 + 2.0)) + * ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))), + ], + ), + ( + Series([1.0, np.nan, 101.0, 50.0]), + False, + True, + [ + (1.0 - (1.0 / (1.0 + 2.0))) ** 2, + np.nan, + (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)), + (1.0 / (1.0 + 2.0)), + ], + ), + ], +) +def test_ewma_nan_handling_cases(s, adjust, ignore_na, w): + # GH 7603 + expected = (s.multiply(w).cumsum() / Series(w).cumsum()).ffill() + result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean() + + tm.assert_series_equal(result, expected) + if ignore_na is False: + # check that ignore_na defaults to False + result = s.ewm(com=2.0, adjust=adjust).mean() + tm.assert_series_equal(result, expected) + + +def test_ewm_alpha(): + # GH 10789 + arr = np.random.default_rng(2).standard_normal(100) + locs = np.arange(20, 40) + arr[locs] = np.nan + + s = Series(arr) + a = s.ewm(alpha=0.61722699889169674).mean() + b = s.ewm(com=0.62014947789973052).mean() + c = s.ewm(span=2.240298955799461).mean() + d = s.ewm(halflife=0.721792864318).mean() + tm.assert_series_equal(a, b) + tm.assert_series_equal(a, c) + tm.assert_series_equal(a, d) + + +def test_ewm_domain_checks(): + # GH 12492 + arr = np.random.default_rng(2).standard_normal(100) + locs = np.arange(20, 40) + arr[locs] = np.nan + + s = Series(arr) + msg = "comass must satisfy: comass >= 0" + with pytest.raises(ValueError, match=msg): + s.ewm(com=-0.1) + s.ewm(com=0.0) + s.ewm(com=0.1) + + msg = "span must satisfy: span >= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(span=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.0) + with pytest.raises(ValueError, match=msg): + s.ewm(span=0.9) + s.ewm(span=1.0) + s.ewm(span=1.1) + + msg = "halflife must satisfy: halflife > 0" + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=0.0) + s.ewm(halflife=0.1) + + msg = "alpha must satisfy: 0 < alpha <= 1" + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=-0.1) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=0.0) + s.ewm(alpha=0.1) + s.ewm(alpha=1.0) + with pytest.raises(ValueError, match=msg): + s.ewm(alpha=1.1) + + +@pytest.mark.parametrize("method", ["mean", "std", "var"]) +def test_ew_empty_series(method): + vals = Series([], dtype=np.float64) + + ewm = vals.ewm(3) + result = getattr(ewm, method)() + tm.assert_almost_equal(result, vals) + + +@pytest.mark.parametrize("min_periods", [0, 1]) +@pytest.mark.parametrize("name", ["mean", "var", "std"]) +def test_ew_min_periods(min_periods, name): + # excluding NaNs correctly + arr = np.random.default_rng(2).standard_normal(50) + arr[:10] = np.nan + arr[-10:] = np.nan + s = Series(arr) + + # check min_periods + # GH 7898 + result = getattr(s.ewm(com=50, min_periods=2), name)() + assert result[:11].isna().all() + assert not result[11:].isna().any() + + result = getattr(s.ewm(com=50, min_periods=min_periods), name)() + if name == "mean": + assert result[:10].isna().all() + assert not result[10:].isna().any() + else: + # ewm.std, ewm.var (with bias=False) require at least + # two values + assert result[:11].isna().all() + assert not result[11:].isna().any() + + # check series of length 0 + result = getattr(Series(dtype=object).ewm(com=50, min_periods=min_periods), name)() + tm.assert_series_equal(result, Series(dtype="float64")) + + # check series of length 1 + result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)() + if name == "mean": + tm.assert_series_equal(result, Series([1.0])) + else: + # ewm.std, ewm.var with bias=False require at least + # two values + tm.assert_series_equal(result, Series([np.nan])) + + # pass in ints + result2 = getattr(Series(np.arange(50)).ewm(span=10), name)() + assert result2.dtype == np.float64 + + +@pytest.mark.parametrize("name", ["cov", "corr"]) +def test_ewm_corr_cov(name): + A = Series(np.random.default_rng(2).standard_normal(50), index=range(50)) + B = A[2:] + np.random.default_rng(2).standard_normal(48) + + A[:10] = np.nan + B.iloc[-10:] = np.nan + + result = getattr(A.ewm(com=20, min_periods=5), name)(B) + assert np.isnan(result.values[:14]).all() + assert not np.isnan(result.values[14:]).any() + + +@pytest.mark.parametrize("min_periods", [0, 1, 2]) +@pytest.mark.parametrize("name", ["cov", "corr"]) +def test_ewm_corr_cov_min_periods(name, min_periods): + # GH 7898 + A = Series(np.random.default_rng(2).standard_normal(50), index=range(50)) + B = A[2:] + np.random.default_rng(2).standard_normal(48) + + A[:10] = np.nan + B.iloc[-10:] = np.nan + + result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B) + # binary functions (ewmcov, ewmcorr) with bias=False require at + # least two values + assert np.isnan(result.values[:11]).all() + assert not np.isnan(result.values[11:]).any() + + # check series of length 0 + empty = Series([], dtype=np.float64) + result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty) + tm.assert_series_equal(result, empty) + + # check series of length 1 + result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)( + Series([1.0]) + ) + tm.assert_series_equal(result, Series([np.nan])) + + +@pytest.mark.parametrize("name", ["cov", "corr"]) +def test_different_input_array_raise_exception(name): + A = Series(np.random.default_rng(2).standard_normal(50), index=range(50)) + A[:10] = np.nan + + msg = "other must be a DataFrame or Series" + # exception raised is Exception + with pytest.raises(ValueError, match=msg): + getattr(A.ewm(com=20, min_periods=5), name)( + np.random.default_rng(2).standard_normal(50) + ) + + +@pytest.mark.parametrize("name", ["var", "std", "mean"]) +def test_ewma_series(series, name): + series_result = getattr(series.ewm(com=10), name)() + assert isinstance(series_result, Series) + + +@pytest.mark.parametrize("name", ["var", "std", "mean"]) +def test_ewma_frame(frame, name): + frame_result = getattr(frame.ewm(com=10), name)() + assert isinstance(frame_result, DataFrame) + + +def test_ewma_span_com_args(series): + A = series.ewm(com=9.5).mean() + B = series.ewm(span=20).mean() + tm.assert_almost_equal(A, B) + msg = "comass, span, halflife, and alpha are mutually exclusive" + with pytest.raises(ValueError, match=msg): + series.ewm(com=9.5, span=20) + + msg = "Must pass one of comass, span, halflife, or alpha" + with pytest.raises(ValueError, match=msg): + series.ewm().mean() + + +def test_ewma_halflife_arg(series): + A = series.ewm(com=13.932726172912965).mean() + B = series.ewm(halflife=10.0).mean() + tm.assert_almost_equal(A, B) + msg = "comass, span, halflife, and alpha are mutually exclusive" + with pytest.raises(ValueError, match=msg): + series.ewm(span=20, halflife=50) + with pytest.raises(ValueError, match=msg): + series.ewm(com=9.5, halflife=50) + with pytest.raises(ValueError, match=msg): + series.ewm(com=9.5, span=20, halflife=50) + msg = "Must pass one of comass, span, halflife, or alpha" + with pytest.raises(ValueError, match=msg): + series.ewm() + + +def test_ewm_alpha_arg(series): + # GH 10789 + s = series + msg = "Must pass one of comass, span, halflife, or alpha" + with pytest.raises(ValueError, match=msg): + s.ewm() + + msg = "comass, span, halflife, and alpha are mutually exclusive" + with pytest.raises(ValueError, match=msg): + s.ewm(com=10.0, alpha=0.5) + with pytest.raises(ValueError, match=msg): + s.ewm(span=10.0, alpha=0.5) + with pytest.raises(ValueError, match=msg): + s.ewm(halflife=10.0, alpha=0.5) + + +@pytest.mark.parametrize("func", ["cov", "corr"]) +def test_ewm_pairwise_cov_corr(func, frame): + result = getattr(frame.ewm(span=10, min_periods=5), func)() + result = result.loc[(slice(None), 1), 5] + result.index = result.index.droplevel(1) + expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5]) + tm.assert_series_equal(result, expected, check_names=False) + + +def test_numeric_only_frame(arithmetic_win_operators, numeric_only): + # GH#46560 + kernel = arithmetic_win_operators + df = DataFrame({"a": [1], "b": 2, "c": 3}) + df["c"] = df["c"].astype(object) + ewm = df.ewm(span=2, min_periods=1) + op = getattr(ewm, kernel, None) + if op is not None: + result = op(numeric_only=numeric_only) + + columns = ["a", "b"] if numeric_only else ["a", "b", "c"] + expected = df[columns].agg([kernel]).reset_index(drop=True).astype(float) + assert list(expected.columns) == columns + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("kernel", ["corr", "cov"]) +@pytest.mark.parametrize("use_arg", [True, False]) +def test_numeric_only_corr_cov_frame(kernel, numeric_only, use_arg): + # GH#46560 + df = DataFrame({"a": [1, 2, 3], "b": 2, "c": 3}) + df["c"] = df["c"].astype(object) + arg = (df,) if use_arg else () + ewm = df.ewm(span=2, min_periods=1) + op = getattr(ewm, kernel) + result = op(*arg, numeric_only=numeric_only) + + # Compare result to op using float dtypes, dropping c when numeric_only is True + columns = ["a", "b"] if numeric_only else ["a", "b", "c"] + df2 = df[columns].astype(float) + arg2 = (df2,) if use_arg else () + ewm2 = df2.ewm(span=2, min_periods=1) + op2 = getattr(ewm2, kernel) + expected = op2(*arg2, numeric_only=numeric_only) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [int, object]) +def test_numeric_only_series(arithmetic_win_operators, numeric_only, dtype): + # GH#46560 + kernel = arithmetic_win_operators + ser = Series([1], dtype=dtype) + ewm = ser.ewm(span=2, min_periods=1) + op = getattr(ewm, kernel, None) + if op is None: + # Nothing to test + pytest.skip("No op to test") + if numeric_only and dtype is object: + msg = f"ExponentialMovingWindow.{kernel} does not implement numeric_only" + with pytest.raises(NotImplementedError, match=msg): + op(numeric_only=numeric_only) + else: + result = op(numeric_only=numeric_only) + expected = ser.agg([kernel]).reset_index(drop=True).astype(float) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("kernel", ["corr", "cov"]) +@pytest.mark.parametrize("use_arg", [True, False]) +@pytest.mark.parametrize("dtype", [int, object]) +def test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype): + # GH#46560 + ser = Series([1, 2, 3], dtype=dtype) + arg = (ser,) if use_arg else () + ewm = ser.ewm(span=2, min_periods=1) + op = getattr(ewm, kernel) + if numeric_only and dtype is object: + msg = f"ExponentialMovingWindow.{kernel} does not implement numeric_only" + with pytest.raises(NotImplementedError, match=msg): + op(*arg, numeric_only=numeric_only) + else: + result = op(*arg, numeric_only=numeric_only) + + ser2 = ser.astype(float) + arg2 = (ser2,) if use_arg else () + ewm2 = ser2.ewm(span=2, min_periods=1) + op2 = getattr(ewm2, kernel) + expected = op2(*arg2, numeric_only=numeric_only) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_groupby.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..45e7e07affd75a707c076e225fe6ddc0fac5da4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_groupby.py @@ -0,0 +1,1318 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + Series, + Timestamp, + date_range, + to_datetime, +) +import pandas._testing as tm +from pandas.api.indexers import BaseIndexer +from pandas.core.groupby.groupby import get_groupby + + +@pytest.fixture +def times_frame(): + """Frame for testing times argument in EWM groupby.""" + return DataFrame( + { + "A": ["a", "b", "c", "a", "b", "c", "a", "b", "c", "a"], + "B": [0, 0, 0, 1, 1, 1, 2, 2, 2, 3], + "C": to_datetime( + [ + "2020-01-01", + "2020-01-01", + "2020-01-01", + "2020-01-02", + "2020-01-10", + "2020-01-22", + "2020-01-03", + "2020-01-23", + "2020-01-23", + "2020-01-04", + ] + ), + } + ) + + +@pytest.fixture +def roll_frame(): + return DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) + + +class TestRolling: + def test_groupby_unsupported_argument(self, roll_frame): + msg = r"groupby\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + roll_frame.groupby("A", foo=1) + + def test_getitem(self, roll_frame): + g = roll_frame.groupby("A") + g_mutated = get_groupby(roll_frame, by="A") + + expected = g_mutated.B.apply(lambda x: x.rolling(2).mean()) + + result = g.rolling(2).mean().B + tm.assert_series_equal(result, expected) + + result = g.rolling(2).B.mean() + tm.assert_series_equal(result, expected) + + result = g.B.rolling(2).mean() + tm.assert_series_equal(result, expected) + + result = roll_frame.B.groupby(roll_frame.A).rolling(2).mean() + tm.assert_series_equal(result, expected) + + def test_getitem_multiple(self, roll_frame): + # GH 13174 + g = roll_frame.groupby("A") + r = g.rolling(2, min_periods=0) + g_mutated = get_groupby(roll_frame, by="A") + expected = g_mutated.B.apply(lambda x: x.rolling(2, min_periods=0).count()) + + result = r.B.count() + tm.assert_series_equal(result, expected) + + result = r.B.count() + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "f", + [ + "sum", + "mean", + "min", + "max", + "count", + "kurt", + "skew", + ], + ) + def test_rolling(self, f, roll_frame): + g = roll_frame.groupby("A", group_keys=False) + r = g.rolling(window=4) + + result = getattr(r, f)() + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(lambda x: getattr(x.rolling(4), f)()) + # groupby.apply doesn't drop the grouped-by column + expected = expected.drop("A", axis=1) + # GH 39732 + expected_index = MultiIndex.from_arrays([roll_frame["A"], range(40)]) + expected.index = expected_index + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("f", ["std", "var"]) + def test_rolling_ddof(self, f, roll_frame): + g = roll_frame.groupby("A", group_keys=False) + r = g.rolling(window=4) + + result = getattr(r, f)(ddof=1) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1)) + # groupby.apply doesn't drop the grouped-by column + expected = expected.drop("A", axis=1) + # GH 39732 + expected_index = MultiIndex.from_arrays([roll_frame["A"], range(40)]) + expected.index = expected_index + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] + ) + def test_rolling_quantile(self, interpolation, roll_frame): + g = roll_frame.groupby("A", group_keys=False) + r = g.rolling(window=4) + + result = r.quantile(0.4, interpolation=interpolation) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply( + lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation) + ) + # groupby.apply doesn't drop the grouped-by column + expected = expected.drop("A", axis=1) + # GH 39732 + expected_index = MultiIndex.from_arrays([roll_frame["A"], range(40)]) + expected.index = expected_index + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("f, expected_val", [["corr", 1], ["cov", 0.5]]) + def test_rolling_corr_cov_other_same_size_as_groups(self, f, expected_val): + # GH 42915 + df = DataFrame( + {"value": range(10), "idx1": [1] * 5 + [2] * 5, "idx2": [1, 2, 3, 4, 5] * 2} + ).set_index(["idx1", "idx2"]) + other = DataFrame({"value": range(5), "idx2": [1, 2, 3, 4, 5]}).set_index( + "idx2" + ) + result = getattr(df.groupby(level=0).rolling(2), f)(other) + expected_data = ([np.nan] + [expected_val] * 4) * 2 + expected = DataFrame( + expected_data, + columns=["value"], + index=MultiIndex.from_arrays( + [ + [1] * 5 + [2] * 5, + [1] * 5 + [2] * 5, + list(range(1, 6)) * 2, + ], + names=["idx1", "idx1", "idx2"], + ), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("f", ["corr", "cov"]) + def test_rolling_corr_cov_other_diff_size_as_groups(self, f, roll_frame): + g = roll_frame.groupby("A") + r = g.rolling(window=4) + + result = getattr(r, f)(roll_frame) + + def func(x): + return getattr(x.rolling(4), f)(roll_frame) + + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(func) + # GH 39591: The grouped column should be all np.nan + # (groupby.apply inserts 0s for cov) + expected["A"] = np.nan + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("f", ["corr", "cov"]) + def test_rolling_corr_cov_pairwise(self, f, roll_frame): + g = roll_frame.groupby("A") + r = g.rolling(window=4) + + result = getattr(r.B, f)(pairwise=True) + + def func(x): + return getattr(x.B.rolling(4), f)(pairwise=True) + + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(func) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "func, expected_values", + [("cov", [[1.0, 1.0], [1.0, 4.0]]), ("corr", [[1.0, 0.5], [0.5, 1.0]])], + ) + def test_rolling_corr_cov_unordered(self, func, expected_values): + # GH 43386 + df = DataFrame( + { + "a": ["g1", "g2", "g1", "g1"], + "b": [0, 0, 1, 2], + "c": [2, 0, 6, 4], + } + ) + rol = df.groupby("a").rolling(3) + result = getattr(rol, func)() + expected = DataFrame( + { + "b": 4 * [np.nan] + expected_values[0] + 2 * [np.nan], + "c": 4 * [np.nan] + expected_values[1] + 2 * [np.nan], + }, + index=MultiIndex.from_tuples( + [ + ("g1", 0, "b"), + ("g1", 0, "c"), + ("g1", 2, "b"), + ("g1", 2, "c"), + ("g1", 3, "b"), + ("g1", 3, "c"), + ("g2", 1, "b"), + ("g2", 1, "c"), + ], + names=["a", None, None], + ), + ) + tm.assert_frame_equal(result, expected) + + def test_rolling_apply(self, raw, roll_frame): + g = roll_frame.groupby("A", group_keys=False) + r = g.rolling(window=4) + + # reduction + result = r.apply(lambda x: x.sum(), raw=raw) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw)) + # groupby.apply doesn't drop the grouped-by column + expected = expected.drop("A", axis=1) + # GH 39732 + expected_index = MultiIndex.from_arrays([roll_frame["A"], range(40)]) + expected.index = expected_index + tm.assert_frame_equal(result, expected) + + def test_rolling_apply_mutability(self): + # GH 14013 + df = DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6}) + g = df.groupby("A") + + mi = MultiIndex.from_tuples( + [("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)] + ) + + mi.names = ["A", None] + # Grouped column should not be a part of the output + expected = DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi) + + result = g.rolling(window=2).sum() + tm.assert_frame_equal(result, expected) + + # Call an arbitrary function on the groupby + g.sum() + + # Make sure nothing has been mutated + result = g.rolling(window=2).sum() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]]) + def test_groupby_rolling(self, expected_value, raw_value): + # GH 31754 + + def isnumpyarray(x): + return int(isinstance(x, np.ndarray)) + + df = DataFrame({"id": [1, 1, 1], "value": [1, 2, 3]}) + result = df.groupby("id").value.rolling(1).apply(isnumpyarray, raw=raw_value) + expected = Series( + [expected_value] * 3, + index=MultiIndex.from_tuples(((1, 0), (1, 1), (1, 2)), names=["id", None]), + name="value", + ) + tm.assert_series_equal(result, expected) + + def test_groupby_rolling_center_center(self): + # GH 35552 + series = Series(range(1, 6)) + result = series.groupby(series).rolling(center=True, window=3).mean() + expected = Series( + [np.nan] * 5, + index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))), + ) + tm.assert_series_equal(result, expected) + + series = Series(range(1, 5)) + result = series.groupby(series).rolling(center=True, window=3).mean() + expected = Series( + [np.nan] * 4, + index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))), + ) + tm.assert_series_equal(result, expected) + + df = DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)}) + result = df.groupby("a").rolling(center=True, window=3).mean() + expected = DataFrame( + [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan], + index=MultiIndex.from_tuples( + ( + ("a", 0), + ("a", 1), + ("a", 2), + ("a", 3), + ("a", 4), + ("b", 5), + ("b", 6), + ("b", 7), + ("b", 8), + ("b", 9), + ("b", 10), + ), + names=["a", None], + ), + columns=["b"], + ) + tm.assert_frame_equal(result, expected) + + df = DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)}) + result = df.groupby("a").rolling(center=True, window=3).mean() + expected = DataFrame( + [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan], + index=MultiIndex.from_tuples( + ( + ("a", 0), + ("a", 1), + ("a", 2), + ("a", 3), + ("a", 4), + ("b", 5), + ("b", 6), + ("b", 7), + ("b", 8), + ("b", 9), + ), + names=["a", None], + ), + columns=["b"], + ) + tm.assert_frame_equal(result, expected) + + def test_groupby_rolling_center_on(self): + # GH 37141 + df = DataFrame( + data={ + "Date": date_range("2020-01-01", "2020-01-10"), + "gb": ["group_1"] * 6 + ["group_2"] * 4, + "value": range(10), + } + ) + result = ( + df.groupby("gb") + .rolling(6, on="Date", center=True, min_periods=1) + .value.mean() + ) + mi = MultiIndex.from_arrays([df["gb"], df["Date"]], names=["gb", "Date"]) + expected = Series( + [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 7.0, 7.5, 7.5, 7.5], + name="value", + index=mi, + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("min_periods", [5, 4, 3]) + def test_groupby_rolling_center_min_periods(self, min_periods): + # GH 36040 + df = DataFrame({"group": ["A"] * 10 + ["B"] * 10, "data": range(20)}) + + window_size = 5 + result = ( + df.groupby("group") + .rolling(window_size, center=True, min_periods=min_periods) + .mean() + ) + result = result.reset_index()[["group", "data"]] + + grp_A_mean = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.5, 8.0] + grp_B_mean = [x + 10.0 for x in grp_A_mean] + + num_nans = max(0, min_periods - 3) # For window_size of 5 + nans = [np.nan] * num_nans + grp_A_expected = nans + grp_A_mean[num_nans : 10 - num_nans] + nans + grp_B_expected = nans + grp_B_mean[num_nans : 10 - num_nans] + nans + + expected = DataFrame( + {"group": ["A"] * 10 + ["B"] * 10, "data": grp_A_expected + grp_B_expected} + ) + + tm.assert_frame_equal(result, expected) + + def test_groupby_subselect_rolling(self): + # GH 35486 + df = DataFrame( + {"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [10, 20, 30, 20]} + ) + result = df.groupby("a")[["b"]].rolling(2).max() + expected = DataFrame( + [np.nan, np.nan, 2.0, np.nan], + columns=["b"], + index=MultiIndex.from_tuples( + ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None] + ), + ) + tm.assert_frame_equal(result, expected) + + result = df.groupby("a")["b"].rolling(2).max() + expected = Series( + [np.nan, np.nan, 2.0, np.nan], + index=MultiIndex.from_tuples( + ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None] + ), + name="b", + ) + tm.assert_series_equal(result, expected) + + def test_groupby_rolling_custom_indexer(self): + # GH 35557 + class SimpleIndexer(BaseIndexer): + def get_window_bounds( + self, + num_values=0, + min_periods=None, + center=None, + closed=None, + step=None, + ): + min_periods = self.window_size if min_periods is None else 0 + end = np.arange(num_values, dtype=np.int64) + 1 + start = end.copy() - self.window_size + start[start < 0] = min_periods + return start, end + + df = DataFrame( + {"a": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5 + ) + result = ( + df.groupby(df.index) + .rolling(SimpleIndexer(window_size=3), min_periods=1) + .sum() + ) + expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + def test_groupby_rolling_subset_with_closed(self): + # GH 35549 + df = DataFrame( + { + "column1": range(8), + "column2": range(8), + "group": ["A"] * 4 + ["B"] * 4, + "date": [ + Timestamp(date) + for date in ["2019-01-01", "2019-01-01", "2019-01-02", "2019-01-02"] + ] + * 2, + } + ) + result = ( + df.groupby("group").rolling("1D", on="date", closed="left")["column1"].sum() + ) + expected = Series( + [np.nan, np.nan, 1.0, 1.0, np.nan, np.nan, 9.0, 9.0], + index=MultiIndex.from_frame( + df[["group", "date"]], + names=["group", "date"], + ), + name="column1", + ) + tm.assert_series_equal(result, expected) + + def test_groupby_subset_rolling_subset_with_closed(self): + # GH 35549 + df = DataFrame( + { + "column1": range(8), + "column2": range(8), + "group": ["A"] * 4 + ["B"] * 4, + "date": [ + Timestamp(date) + for date in ["2019-01-01", "2019-01-01", "2019-01-02", "2019-01-02"] + ] + * 2, + } + ) + + result = ( + df.groupby("group")[["column1", "date"]] + .rolling("1D", on="date", closed="left")["column1"] + .sum() + ) + expected = Series( + [np.nan, np.nan, 1.0, 1.0, np.nan, np.nan, 9.0, 9.0], + index=MultiIndex.from_frame( + df[["group", "date"]], + names=["group", "date"], + ), + name="column1", + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("func", ["max", "min"]) + def test_groupby_rolling_index_changed(self, func): + # GH: #36018 nlevels of MultiIndex changed + ds = Series( + [1, 2, 2], + index=MultiIndex.from_tuples( + [("a", "x"), ("a", "y"), ("c", "z")], names=["1", "2"] + ), + name="a", + ) + + result = getattr(ds.groupby(ds).rolling(2), func)() + expected = Series( + [np.nan, np.nan, 2.0], + index=MultiIndex.from_tuples( + [(1, "a", "x"), (2, "a", "y"), (2, "c", "z")], names=["a", "1", "2"] + ), + name="a", + ) + tm.assert_series_equal(result, expected) + + def test_groupby_rolling_empty_frame(self): + # GH 36197 + expected = DataFrame({"s1": []}) + result = expected.groupby("s1").rolling(window=1).sum() + # GH 32262 + expected = expected.drop(columns="s1") + # GH-38057 from_tuples gives empty object dtype, we now get float/int levels + # expected.index = MultiIndex.from_tuples([], names=["s1", None]) + expected.index = MultiIndex.from_product( + [Index([], dtype="float64"), Index([], dtype="int64")], names=["s1", None] + ) + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"s1": [], "s2": []}) + result = expected.groupby(["s1", "s2"]).rolling(window=1).sum() + # GH 32262 + expected = expected.drop(columns=["s1", "s2"]) + expected.index = MultiIndex.from_product( + [ + Index([], dtype="float64"), + Index([], dtype="float64"), + Index([], dtype="int64"), + ], + names=["s1", "s2", None], + ) + tm.assert_frame_equal(result, expected) + + def test_groupby_rolling_string_index(self): + # GH: 36727 + df = DataFrame( + [ + ["A", "group_1", Timestamp(2019, 1, 1, 9)], + ["B", "group_1", Timestamp(2019, 1, 2, 9)], + ["Z", "group_2", Timestamp(2019, 1, 3, 9)], + ["H", "group_1", Timestamp(2019, 1, 6, 9)], + ["E", "group_2", Timestamp(2019, 1, 20, 9)], + ], + columns=["index", "group", "eventTime"], + ).set_index("index") + + groups = df.groupby("group") + df["count_to_date"] = groups.cumcount() + rolling_groups = groups.rolling("10d", on="eventTime") + result = rolling_groups.apply(lambda df: df.shape[0]) + expected = DataFrame( + [ + ["A", "group_1", Timestamp(2019, 1, 1, 9), 1.0], + ["B", "group_1", Timestamp(2019, 1, 2, 9), 2.0], + ["H", "group_1", Timestamp(2019, 1, 6, 9), 3.0], + ["Z", "group_2", Timestamp(2019, 1, 3, 9), 1.0], + ["E", "group_2", Timestamp(2019, 1, 20, 9), 1.0], + ], + columns=["index", "group", "eventTime", "count_to_date"], + ).set_index(["group", "index"]) + tm.assert_frame_equal(result, expected) + + def test_groupby_rolling_no_sort(self): + # GH 36889 + result = ( + DataFrame({"foo": [2, 1], "bar": [2, 1]}) + .groupby("foo", sort=False) + .rolling(1) + .min() + ) + expected = DataFrame( + np.array([[2.0, 2.0], [1.0, 1.0]]), + columns=["foo", "bar"], + index=MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]), + ) + # GH 32262 + expected = expected.drop(columns="foo") + tm.assert_frame_equal(result, expected) + + def test_groupby_rolling_count_closed_on(self, unit): + # GH 35869 + df = DataFrame( + { + "column1": range(6), + "column2": range(6), + "group": 3 * ["A", "B"], + "date": date_range(end="20190101", periods=6, unit=unit), + } + ) + result = ( + df.groupby("group") + .rolling("3d", on="date", closed="left")["column1"] + .count() + ) + dti = DatetimeIndex( + [ + "2018-12-27", + "2018-12-29", + "2018-12-31", + "2018-12-28", + "2018-12-30", + "2019-01-01", + ], + dtype=f"M8[{unit}]", + ) + mi = MultiIndex.from_arrays( + [ + ["A", "A", "A", "B", "B", "B"], + dti, + ], + names=["group", "date"], + ) + expected = Series( + [np.nan, 1.0, 1.0, np.nan, 1.0, 1.0], + name="column1", + index=mi, + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + ("func", "kwargs"), + [("rolling", {"window": 2, "min_periods": 1}), ("expanding", {})], + ) + def test_groupby_rolling_sem(self, func, kwargs): + # GH: 26476 + df = DataFrame( + [["a", 1], ["a", 2], ["b", 1], ["b", 2], ["b", 3]], columns=["a", "b"] + ) + result = getattr(df.groupby("a"), func)(**kwargs).sem() + expected = DataFrame( + {"a": [np.nan] * 5, "b": [np.nan, 0.70711, np.nan, 0.70711, 0.70711]}, + index=MultiIndex.from_tuples( + [("a", 0), ("a", 1), ("b", 2), ("b", 3), ("b", 4)], names=["a", None] + ), + ) + # GH 32262 + expected = expected.drop(columns="a") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + ("rollings", "key"), [({"on": "a"}, "a"), ({"on": None}, "index")] + ) + def test_groupby_rolling_nans_in_index(self, rollings, key): + # GH: 34617 + df = DataFrame( + { + "a": to_datetime(["2020-06-01 12:00", "2020-06-01 14:00", np.nan]), + "b": [1, 2, 3], + "c": [1, 1, 1], + } + ) + if key == "index": + df = df.set_index("a") + with pytest.raises(ValueError, match=f"{key} values must not have NaT"): + df.groupby("c").rolling("60min", **rollings) + + @pytest.mark.parametrize("group_keys", [True, False]) + def test_groupby_rolling_group_keys(self, group_keys): + # GH 37641 + # GH 38523: GH 37641 actually was not a bug. + # group_keys only applies to groupby.apply directly + arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]] + index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2")) + + s = Series([1, 2, 3], index=index) + result = s.groupby(["idx1", "idx2"], group_keys=group_keys).rolling(1).mean() + expected = Series( + [1.0, 2.0, 3.0], + index=MultiIndex.from_tuples( + [ + ("val1", "val1", "val1", "val1"), + ("val1", "val1", "val1", "val1"), + ("val2", "val2", "val2", "val2"), + ], + names=["idx1", "idx2", "idx1", "idx2"], + ), + ) + tm.assert_series_equal(result, expected) + + def test_groupby_rolling_index_level_and_column_label(self): + # The groupby keys should not appear as a resulting column + arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]] + index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2")) + + df = DataFrame({"A": [1, 1, 2], "B": range(3)}, index=index) + result = df.groupby(["idx1", "A"]).rolling(1).mean() + expected = DataFrame( + {"B": [0.0, 1.0, 2.0]}, + index=MultiIndex.from_tuples( + [ + ("val1", 1, "val1", "val1"), + ("val1", 1, "val1", "val1"), + ("val2", 2, "val2", "val2"), + ], + names=["idx1", "A", "idx1", "idx2"], + ), + ) + tm.assert_frame_equal(result, expected) + + def test_groupby_rolling_resulting_multiindex(self): + # a few different cases checking the created MultiIndex of the result + # https://github.com/pandas-dev/pandas/pull/38057 + + # grouping by 1 columns -> 2-level MI as result + df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4}) + result = df.groupby("b").rolling(3).mean() + expected_index = MultiIndex.from_tuples( + [(1, 0), (1, 2), (1, 4), (1, 6), (2, 1), (2, 3), (2, 5), (2, 7)], + names=["b", None], + ) + tm.assert_index_equal(result.index, expected_index) + + def test_groupby_rolling_resulting_multiindex2(self): + # grouping by 2 columns -> 3-level MI as result + df = DataFrame({"a": np.arange(12.0), "b": [1, 2] * 6, "c": [1, 2, 3, 4] * 3}) + result = df.groupby(["b", "c"]).rolling(2).sum() + expected_index = MultiIndex.from_tuples( + [ + (1, 1, 0), + (1, 1, 4), + (1, 1, 8), + (1, 3, 2), + (1, 3, 6), + (1, 3, 10), + (2, 2, 1), + (2, 2, 5), + (2, 2, 9), + (2, 4, 3), + (2, 4, 7), + (2, 4, 11), + ], + names=["b", "c", None], + ) + tm.assert_index_equal(result.index, expected_index) + + def test_groupby_rolling_resulting_multiindex3(self): + # grouping with 1 level on dataframe with 2-level MI -> 3-level MI as result + df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4, "c": [1, 2, 3, 4] * 2}) + df = df.set_index("c", append=True) + result = df.groupby("b").rolling(3).mean() + expected_index = MultiIndex.from_tuples( + [ + (1, 0, 1), + (1, 2, 3), + (1, 4, 1), + (1, 6, 3), + (2, 1, 2), + (2, 3, 4), + (2, 5, 2), + (2, 7, 4), + ], + names=["b", None, "c"], + ) + tm.assert_index_equal(result.index, expected_index, exact="equiv") + + def test_groupby_rolling_object_doesnt_affect_groupby_apply(self, roll_frame): + # GH 39732 + g = roll_frame.groupby("A", group_keys=False) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(lambda x: x.rolling(4).sum()).index + _ = g.rolling(window=4) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + result = g.apply(lambda x: x.rolling(4).sum()).index + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + ("window", "min_periods", "closed", "expected"), + [ + (2, 0, "left", [None, 0.0, 1.0, 1.0, None, 0.0, 1.0, 1.0]), + (2, 2, "left", [None, None, 1.0, 1.0, None, None, 1.0, 1.0]), + (4, 4, "left", [None, None, None, None, None, None, None, None]), + (4, 4, "right", [None, None, None, 5.0, None, None, None, 5.0]), + ], + ) + def test_groupby_rolling_var(self, window, min_periods, closed, expected): + df = DataFrame([1, 2, 3, 4, 5, 6, 7, 8]) + result = ( + df.groupby([1, 2, 1, 2, 1, 2, 1, 2]) + .rolling(window=window, min_periods=min_periods, closed=closed) + .var(0) + ) + expected_result = DataFrame( + np.array(expected, dtype="float64"), + index=MultiIndex( + levels=[np.array([1, 2]), [0, 1, 2, 3, 4, 5, 6, 7]], + codes=[[0, 0, 0, 0, 1, 1, 1, 1], [0, 2, 4, 6, 1, 3, 5, 7]], + ), + ) + tm.assert_frame_equal(result, expected_result) + + @pytest.mark.parametrize( + "columns", [MultiIndex.from_tuples([("A", ""), ("B", "C")]), ["A", "B"]] + ) + def test_by_column_not_in_values(self, columns): + # GH 32262 + df = DataFrame([[1, 0]] * 20 + [[2, 0]] * 12 + [[3, 0]] * 8, columns=columns) + g = df.groupby("A") + original_obj = g.obj.copy(deep=True) + r = g.rolling(4) + result = r.sum() + assert "A" not in result.columns + tm.assert_frame_equal(g.obj, original_obj) + + def test_groupby_level(self): + # GH 38523, 38787 + arrays = [ + ["Falcon", "Falcon", "Parrot", "Parrot"], + ["Captive", "Wild", "Captive", "Wild"], + ] + index = MultiIndex.from_arrays(arrays, names=("Animal", "Type")) + df = DataFrame({"Max Speed": [390.0, 350.0, 30.0, 20.0]}, index=index) + result = df.groupby(level=0)["Max Speed"].rolling(2).sum() + expected = Series( + [np.nan, 740.0, np.nan, 50.0], + index=MultiIndex.from_tuples( + [ + ("Falcon", "Falcon", "Captive"), + ("Falcon", "Falcon", "Wild"), + ("Parrot", "Parrot", "Captive"), + ("Parrot", "Parrot", "Wild"), + ], + names=["Animal", "Animal", "Type"], + ), + name="Max Speed", + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "by, expected_data", + [ + [["id"], {"num": [100.0, 150.0, 150.0, 200.0]}], + [ + ["id", "index"], + { + "date": [ + Timestamp("2018-01-01"), + Timestamp("2018-01-02"), + Timestamp("2018-01-01"), + Timestamp("2018-01-02"), + ], + "num": [100.0, 200.0, 150.0, 250.0], + }, + ], + ], + ) + def test_as_index_false(self, by, expected_data, unit): + # GH 39433 + data = [ + ["A", "2018-01-01", 100.0], + ["A", "2018-01-02", 200.0], + ["B", "2018-01-01", 150.0], + ["B", "2018-01-02", 250.0], + ] + df = DataFrame(data, columns=["id", "date", "num"]) + df["date"] = df["date"].astype(f"M8[{unit}]") + df = df.set_index(["date"]) + + gp_by = [getattr(df, attr) for attr in by] + result = ( + df.groupby(gp_by, as_index=False).rolling(window=2, min_periods=1).mean() + ) + + expected = {"id": ["A", "A", "B", "B"]} + expected.update(expected_data) + expected = DataFrame( + expected, + index=df.index, + ) + if "date" in expected_data: + expected["date"] = expected["date"].astype(f"M8[{unit}]") + tm.assert_frame_equal(result, expected) + + def test_nan_and_zero_endpoints(self, any_int_numpy_dtype): + # https://github.com/twosigma/pandas/issues/53 + typ = np.dtype(any_int_numpy_dtype).type + size = 1000 + idx = np.repeat(typ(0), size) + idx[-1] = 1 + + val = 5e25 + arr = np.repeat(val, size) + arr[0] = np.nan + arr[-1] = 0 + + df = DataFrame( + { + "index": idx, + "adl2": arr, + } + ).set_index("index") + result = df.groupby("index")["adl2"].rolling(window=10, min_periods=1).mean() + expected = Series( + arr, + name="adl2", + index=MultiIndex.from_arrays( + [ + Index([0] * 999 + [1], dtype=typ, name="index"), + Index([0] * 999 + [1], dtype=typ, name="index"), + ], + ), + ) + tm.assert_series_equal(result, expected) + + def test_groupby_rolling_non_monotonic(self): + # GH 43909 + + shuffled = [3, 0, 1, 2] + sec = 1_000 + df = DataFrame( + [{"t": Timestamp(2 * x * sec), "x": x + 1, "c": 42} for x in shuffled] + ) + with pytest.raises(ValueError, match=r".* must be monotonic"): + df.groupby("c").rolling(on="t", window="3s") + + def test_groupby_monotonic(self): + # GH 15130 + # we don't need to validate monotonicity when grouping + + # GH 43909 we should raise an error here to match + # behaviour of non-groupby rolling. + + data = [ + ["David", "1/1/2015", 100], + ["David", "1/5/2015", 500], + ["David", "5/30/2015", 50], + ["David", "7/25/2015", 50], + ["Ryan", "1/4/2014", 100], + ["Ryan", "1/19/2015", 500], + ["Ryan", "3/31/2016", 50], + ["Joe", "7/1/2015", 100], + ["Joe", "9/9/2015", 500], + ["Joe", "10/15/2015", 50], + ] + + df = DataFrame(data=data, columns=["name", "date", "amount"]) + df["date"] = to_datetime(df["date"]) + df = df.sort_values("date") + + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = ( + df.set_index("date") + .groupby("name") + .apply(lambda x: x.rolling("180D")["amount"].sum()) + ) + result = df.groupby("name").rolling("180D", on="date")["amount"].sum() + tm.assert_series_equal(result, expected) + + def test_datelike_on_monotonic_within_each_group(self): + # GH 13966 (similar to #15130, closed by #15175) + + # superseded by 43909 + # GH 46061: OK if the on is monotonic relative to each each group + + dates = date_range(start="2016-01-01 09:30:00", periods=20, freq="s") + df = DataFrame( + { + "A": [1] * 20 + [2] * 12 + [3] * 8, + "B": np.concatenate((dates, dates)), + "C": np.arange(40), + } + ) + + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = ( + df.set_index("B") + .groupby("A") + .apply(lambda x: x.rolling("4s")["C"].mean()) + ) + result = df.groupby("A").rolling("4s", on="B").C.mean() + tm.assert_series_equal(result, expected) + + def test_datelike_on_not_monotonic_within_each_group(self): + # GH 46061 + df = DataFrame( + { + "A": [1] * 3 + [2] * 3, + "B": [Timestamp(year, 1, 1) for year in [2020, 2021, 2019]] * 2, + "C": range(6), + } + ) + with pytest.raises(ValueError, match="Each group within B must be monotonic."): + df.groupby("A").rolling("365D", on="B") + + +class TestExpanding: + @pytest.fixture + def frame(self): + return DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)}) + + @pytest.mark.parametrize( + "f", ["sum", "mean", "min", "max", "count", "kurt", "skew"] + ) + def test_expanding(self, f, frame): + g = frame.groupby("A", group_keys=False) + r = g.expanding() + + result = getattr(r, f)() + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(lambda x: getattr(x.expanding(), f)()) + # groupby.apply doesn't drop the grouped-by column + expected = expected.drop("A", axis=1) + # GH 39732 + expected_index = MultiIndex.from_arrays([frame["A"], range(40)]) + expected.index = expected_index + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("f", ["std", "var"]) + def test_expanding_ddof(self, f, frame): + g = frame.groupby("A", group_keys=False) + r = g.expanding() + + result = getattr(r, f)(ddof=0) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0)) + # groupby.apply doesn't drop the grouped-by column + expected = expected.drop("A", axis=1) + # GH 39732 + expected_index = MultiIndex.from_arrays([frame["A"], range(40)]) + expected.index = expected_index + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] + ) + def test_expanding_quantile(self, interpolation, frame): + g = frame.groupby("A", group_keys=False) + r = g.expanding() + + result = r.quantile(0.4, interpolation=interpolation) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply( + lambda x: x.expanding().quantile(0.4, interpolation=interpolation) + ) + # groupby.apply doesn't drop the grouped-by column + expected = expected.drop("A", axis=1) + # GH 39732 + expected_index = MultiIndex.from_arrays([frame["A"], range(40)]) + expected.index = expected_index + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("f", ["corr", "cov"]) + def test_expanding_corr_cov(self, f, frame): + g = frame.groupby("A") + r = g.expanding() + + result = getattr(r, f)(frame) + + def func_0(x): + return getattr(x.expanding(), f)(frame) + + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(func_0) + # GH 39591: groupby.apply returns 1 instead of nan for windows + # with all nan values + null_idx = list(range(20, 61)) + list(range(72, 113)) + expected.iloc[null_idx, 1] = np.nan + # GH 39591: The grouped column should be all np.nan + # (groupby.apply inserts 0s for cov) + expected["A"] = np.nan + tm.assert_frame_equal(result, expected) + + result = getattr(r.B, f)(pairwise=True) + + def func_1(x): + return getattr(x.B.expanding(), f)(pairwise=True) + + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply(func_1) + tm.assert_series_equal(result, expected) + + def test_expanding_apply(self, raw, frame): + g = frame.groupby("A", group_keys=False) + r = g.expanding() + + # reduction + result = r.apply(lambda x: x.sum(), raw=raw) + msg = "DataFrameGroupBy.apply operated on the grouping columns" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + expected = g.apply( + lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw) + ) + # groupby.apply doesn't drop the grouped-by column + expected = expected.drop("A", axis=1) + # GH 39732 + expected_index = MultiIndex.from_arrays([frame["A"], range(40)]) + expected.index = expected_index + tm.assert_frame_equal(result, expected) + + +class TestEWM: + @pytest.mark.parametrize( + "method, expected_data", + [ + ["mean", [0.0, 0.6666666666666666, 1.4285714285714286, 2.2666666666666666]], + ["std", [np.nan, 0.707107, 0.963624, 1.177164]], + ["var", [np.nan, 0.5, 0.9285714285714286, 1.3857142857142857]], + ], + ) + def test_methods(self, method, expected_data): + # GH 16037 + df = DataFrame({"A": ["a"] * 4, "B": range(4)}) + result = getattr(df.groupby("A").ewm(com=1.0), method)() + expected = DataFrame( + {"B": expected_data}, + index=MultiIndex.from_tuples( + [ + ("a", 0), + ("a", 1), + ("a", 2), + ("a", 3), + ], + names=["A", None], + ), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "method, expected_data", + [["corr", [np.nan, 1.0, 1.0, 1]], ["cov", [np.nan, 0.5, 0.928571, 1.385714]]], + ) + def test_pairwise_methods(self, method, expected_data): + # GH 16037 + df = DataFrame({"A": ["a"] * 4, "B": range(4)}) + result = getattr(df.groupby("A").ewm(com=1.0), method)() + expected = DataFrame( + {"B": expected_data}, + index=MultiIndex.from_tuples( + [ + ("a", 0, "B"), + ("a", 1, "B"), + ("a", 2, "B"), + ("a", 3, "B"), + ], + names=["A", None, None], + ), + ) + tm.assert_frame_equal(result, expected) + + expected = df.groupby("A")[["B"]].apply( + lambda x: getattr(x.ewm(com=1.0), method)() + ) + tm.assert_frame_equal(result, expected) + + def test_times(self, times_frame): + # GH 40951 + halflife = "23 days" + # GH#42738 + times = times_frame.pop("C") + result = times_frame.groupby("A").ewm(halflife=halflife, times=times).mean() + expected = DataFrame( + { + "B": [ + 0.0, + 0.507534, + 1.020088, + 1.537661, + 0.0, + 0.567395, + 1.221209, + 0.0, + 0.653141, + 1.195003, + ] + }, + index=MultiIndex.from_tuples( + [ + ("a", 0), + ("a", 3), + ("a", 6), + ("a", 9), + ("b", 1), + ("b", 4), + ("b", 7), + ("c", 2), + ("c", 5), + ("c", 8), + ], + names=["A", None], + ), + ) + tm.assert_frame_equal(result, expected) + + def test_times_array(self, times_frame): + # GH 40951 + halflife = "23 days" + times = times_frame.pop("C") + gb = times_frame.groupby("A") + result = gb.ewm(halflife=halflife, times=times).mean() + expected = gb.ewm(halflife=halflife, times=times.values).mean() + tm.assert_frame_equal(result, expected) + + def test_dont_mutate_obj_after_slicing(self): + # GH 43355 + df = DataFrame( + { + "id": ["a", "a", "b", "b", "b"], + "timestamp": date_range("2021-9-1", periods=5, freq="h"), + "y": range(5), + } + ) + grp = df.groupby("id").rolling("1h", on="timestamp") + result = grp.count() + expected_df = DataFrame( + { + "timestamp": date_range("2021-9-1", periods=5, freq="h"), + "y": [1.0] * 5, + }, + index=MultiIndex.from_arrays( + [["a", "a", "b", "b", "b"], list(range(5))], names=["id", None] + ), + ) + tm.assert_frame_equal(result, expected_df) + + result = grp["y"].count() + expected_series = Series( + [1.0] * 5, + index=MultiIndex.from_arrays( + [ + ["a", "a", "b", "b", "b"], + date_range("2021-9-1", periods=5, freq="h"), + ], + names=["id", "timestamp"], + ), + name="y", + ) + tm.assert_series_equal(result, expected_series) + # This is the key test + result = grp.count() + tm.assert_frame_equal(result, expected_df) + + +def test_rolling_corr_with_single_integer_in_index(): + # GH 44078 + df = DataFrame({"a": [(1,), (1,), (1,)], "b": [4, 5, 6]}) + gb = df.groupby(["a"]) + result = gb.rolling(2).corr(other=df) + index = MultiIndex.from_tuples([((1,), 0), ((1,), 1), ((1,), 2)], names=["a", None]) + expected = DataFrame( + {"a": [np.nan, np.nan, np.nan], "b": [np.nan, 1.0, 1.0]}, index=index + ) + tm.assert_frame_equal(result, expected) + + +def test_rolling_corr_with_tuples_in_index(): + # GH 44078 + df = DataFrame( + { + "a": [ + ( + 1, + 2, + ), + ( + 1, + 2, + ), + ( + 1, + 2, + ), + ], + "b": [4, 5, 6], + } + ) + gb = df.groupby(["a"]) + result = gb.rolling(2).corr(other=df) + index = MultiIndex.from_tuples( + [((1, 2), 0), ((1, 2), 1), ((1, 2), 2)], names=["a", None] + ) + expected = DataFrame( + {"a": [np.nan, np.nan, np.nan], "b": [np.nan, 1.0, 1.0]}, index=index + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_numba.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_numba.py new file mode 100644 index 0000000000000000000000000000000000000000..139e1ff7f65fda615ab149e29b8369e2001050e5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_numba.py @@ -0,0 +1,455 @@ +import numpy as np +import pytest + +from pandas.errors import NumbaUtilError +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + Series, + option_context, + to_datetime, +) +import pandas._testing as tm + +pytestmark = pytest.mark.single_cpu + + +@pytest.fixture(params=["single", "table"]) +def method(request): + """method keyword in rolling/expanding/ewm constructor""" + return request.param + + +@pytest.fixture( + params=[ + ["sum", {}], + ["mean", {}], + ["median", {}], + ["max", {}], + ["min", {}], + ["var", {}], + ["var", {"ddof": 0}], + ["std", {}], + ["std", {"ddof": 0}], + ] +) +def arithmetic_numba_supported_operators(request): + return request.param + + +@td.skip_if_no("numba") +@pytest.mark.filterwarnings("ignore") +# Filter warnings when parallel=True and the function can't be parallelized by Numba +class TestEngine: + @pytest.mark.parametrize("jit", [True, False]) + def test_numba_vs_cython_apply(self, jit, nogil, parallel, nopython, center, step): + def f(x, *args): + arg_sum = 0 + for arg in args: + arg_sum += arg + return np.mean(x) + arg_sum + + if jit: + import numba + + f = numba.jit(f) + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + args = (2,) + + s = Series(range(10)) + result = s.rolling(2, center=center, step=step).apply( + f, args=args, engine="numba", engine_kwargs=engine_kwargs, raw=True + ) + expected = s.rolling(2, center=center, step=step).apply( + f, engine="cython", args=args, raw=True + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data", + [ + DataFrame(np.eye(5)), + DataFrame( + [ + [5, 7, 7, 7, np.nan, np.inf, 4, 3, 3, 3], + [5, 7, 7, 7, np.nan, np.inf, 7, 3, 3, 3], + [np.nan, np.nan, 5, 6, 7, 5, 5, 5, 5, 5], + ] + ).T, + Series(range(5), name="foo"), + Series([20, 10, 10, np.inf, 1, 1, 2, 3]), + Series([20, 10, 10, np.nan, 10, 1, 2, 3]), + ], + ) + def test_numba_vs_cython_rolling_methods( + self, + data, + nogil, + parallel, + nopython, + arithmetic_numba_supported_operators, + step, + ): + method, kwargs = arithmetic_numba_supported_operators + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + roll = data.rolling(3, step=step) + result = getattr(roll, method)( + engine="numba", engine_kwargs=engine_kwargs, **kwargs + ) + expected = getattr(roll, method)(engine="cython", **kwargs) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "data", [DataFrame(np.eye(5)), Series(range(5), name="foo")] + ) + def test_numba_vs_cython_expanding_methods( + self, data, nogil, parallel, nopython, arithmetic_numba_supported_operators + ): + method, kwargs = arithmetic_numba_supported_operators + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + data = DataFrame(np.eye(5)) + expand = data.expanding() + result = getattr(expand, method)( + engine="numba", engine_kwargs=engine_kwargs, **kwargs + ) + expected = getattr(expand, method)(engine="cython", **kwargs) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("jit", [True, False]) + def test_cache_apply(self, jit, nogil, parallel, nopython, step): + # Test that the functions are cached correctly if we switch functions + def func_1(x): + return np.mean(x) + 4 + + def func_2(x): + return np.std(x) * 5 + + if jit: + import numba + + func_1 = numba.jit(func_1) + func_2 = numba.jit(func_2) + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + roll = Series(range(10)).rolling(2, step=step) + result = roll.apply( + func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True + ) + expected = roll.apply(func_1, engine="cython", raw=True) + tm.assert_series_equal(result, expected) + + result = roll.apply( + func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True + ) + expected = roll.apply(func_2, engine="cython", raw=True) + tm.assert_series_equal(result, expected) + # This run should use the cached func_1 + result = roll.apply( + func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True + ) + expected = roll.apply(func_1, engine="cython", raw=True) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "window,window_kwargs", + [ + ["rolling", {"window": 3, "min_periods": 0}], + ["expanding", {}], + ], + ) + def test_dont_cache_args( + self, window, window_kwargs, nogil, parallel, nopython, method + ): + # GH 42287 + + def add(values, x): + return np.sum(values) + x + + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + df = DataFrame({"value": [0, 0, 0]}) + result = getattr(df, window)(method=method, **window_kwargs).apply( + add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(1,) + ) + expected = DataFrame({"value": [1.0, 1.0, 1.0]}) + tm.assert_frame_equal(result, expected) + + result = getattr(df, window)(method=method, **window_kwargs).apply( + add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(2,) + ) + expected = DataFrame({"value": [2.0, 2.0, 2.0]}) + tm.assert_frame_equal(result, expected) + + def test_dont_cache_engine_kwargs(self): + # If the user passes a different set of engine_kwargs don't return the same + # jitted function + nogil = False + parallel = True + nopython = True + + def func(x): + return nogil + parallel + nopython + + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + df = DataFrame({"value": [0, 0, 0]}) + result = df.rolling(1).apply( + func, raw=True, engine="numba", engine_kwargs=engine_kwargs + ) + expected = DataFrame({"value": [2.0, 2.0, 2.0]}) + tm.assert_frame_equal(result, expected) + + parallel = False + engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel} + result = df.rolling(1).apply( + func, raw=True, engine="numba", engine_kwargs=engine_kwargs + ) + expected = DataFrame({"value": [1.0, 1.0, 1.0]}) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("numba") +class TestEWM: + @pytest.mark.parametrize( + "grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"] + ) + @pytest.mark.parametrize("method", ["mean", "sum"]) + def test_invalid_engine(self, grouper, method): + df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)}) + with pytest.raises(ValueError, match="engine must be either"): + getattr(grouper(df).ewm(com=1.0), method)(engine="foo") + + @pytest.mark.parametrize( + "grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"] + ) + @pytest.mark.parametrize("method", ["mean", "sum"]) + def test_invalid_engine_kwargs(self, grouper, method): + df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)}) + with pytest.raises(ValueError, match="cython engine does not"): + getattr(grouper(df).ewm(com=1.0), method)( + engine="cython", engine_kwargs={"nopython": True} + ) + + @pytest.mark.parametrize("grouper", ["None", "groupby"]) + @pytest.mark.parametrize("method", ["mean", "sum"]) + def test_cython_vs_numba( + self, grouper, method, nogil, parallel, nopython, ignore_na, adjust + ): + df = DataFrame({"B": range(4)}) + if grouper == "None": + grouper = lambda x: x + else: + df["A"] = ["a", "b", "a", "b"] + grouper = lambda x: x.groupby("A") + if method == "sum": + adjust = True + ewm = grouper(df).ewm(com=1.0, adjust=adjust, ignore_na=ignore_na) + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + result = getattr(ewm, method)(engine="numba", engine_kwargs=engine_kwargs) + expected = getattr(ewm, method)(engine="cython") + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("grouper", ["None", "groupby"]) + def test_cython_vs_numba_times(self, grouper, nogil, parallel, nopython, ignore_na): + # GH 40951 + + df = DataFrame({"B": [0, 0, 1, 1, 2, 2]}) + if grouper == "None": + grouper = lambda x: x + else: + grouper = lambda x: x.groupby("A") + df["A"] = ["a", "b", "a", "b", "b", "a"] + + halflife = "23 days" + times = to_datetime( + [ + "2020-01-01", + "2020-01-01", + "2020-01-02", + "2020-01-10", + "2020-02-23", + "2020-01-03", + ] + ) + ewm = grouper(df).ewm( + halflife=halflife, adjust=True, ignore_na=ignore_na, times=times + ) + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + result = ewm.mean(engine="numba", engine_kwargs=engine_kwargs) + expected = ewm.mean(engine="cython") + + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("numba") +def test_use_global_config(): + def f(x): + return np.mean(x) + 2 + + s = Series(range(10)) + with option_context("compute.use_numba", True): + result = s.rolling(2).apply(f, engine=None, raw=True) + expected = s.rolling(2).apply(f, engine="numba", raw=True) + tm.assert_series_equal(expected, result) + + +@td.skip_if_no("numba") +def test_invalid_kwargs_nopython(): + with pytest.raises(NumbaUtilError, match="numba does not support kwargs with"): + Series(range(1)).rolling(1).apply( + lambda x: x, kwargs={"a": 1}, engine="numba", raw=True + ) + + +@td.skip_if_no("numba") +@pytest.mark.slow +@pytest.mark.filterwarnings("ignore") +# Filter warnings when parallel=True and the function can't be parallelized by Numba +class TestTableMethod: + def test_table_series_valueerror(self): + def f(x): + return np.sum(x, axis=0) + 1 + + with pytest.raises( + ValueError, match="method='table' not applicable for Series objects." + ): + Series(range(1)).rolling(1, method="table").apply( + f, engine="numba", raw=True + ) + + def test_table_method_rolling_methods( + self, + axis, + nogil, + parallel, + nopython, + arithmetic_numba_supported_operators, + step, + ): + method, kwargs = arithmetic_numba_supported_operators + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + df = DataFrame(np.eye(3)) + roll_table = df.rolling(2, method="table", axis=axis, min_periods=0, step=step) + if method in ("var", "std"): + with pytest.raises(NotImplementedError, match=f"{method} not supported"): + getattr(roll_table, method)( + engine_kwargs=engine_kwargs, engine="numba", **kwargs + ) + else: + roll_single = df.rolling( + 2, method="single", axis=axis, min_periods=0, step=step + ) + result = getattr(roll_table, method)( + engine_kwargs=engine_kwargs, engine="numba", **kwargs + ) + expected = getattr(roll_single, method)( + engine_kwargs=engine_kwargs, engine="numba", **kwargs + ) + tm.assert_frame_equal(result, expected) + + def test_table_method_rolling_apply(self, axis, nogil, parallel, nopython, step): + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + def f(x): + return np.sum(x, axis=0) + 1 + + df = DataFrame(np.eye(3)) + result = df.rolling( + 2, method="table", axis=axis, min_periods=0, step=step + ).apply(f, raw=True, engine_kwargs=engine_kwargs, engine="numba") + expected = df.rolling( + 2, method="single", axis=axis, min_periods=0, step=step + ).apply(f, raw=True, engine_kwargs=engine_kwargs, engine="numba") + tm.assert_frame_equal(result, expected) + + def test_table_method_rolling_weighted_mean(self, step): + def weighted_mean(x): + arr = np.ones((1, x.shape[1])) + arr[:, :2] = (x[:, :2] * x[:, 2]).sum(axis=0) / x[:, 2].sum() + return arr + + df = DataFrame([[1, 2, 0.6], [2, 3, 0.4], [3, 4, 0.2], [4, 5, 0.7]]) + result = df.rolling(2, method="table", min_periods=0, step=step).apply( + weighted_mean, raw=True, engine="numba" + ) + expected = DataFrame( + [ + [1.0, 2.0, 1.0], + [1.8, 2.0, 1.0], + [3.333333, 2.333333, 1.0], + [1.555556, 7, 1.0], + ] + )[::step] + tm.assert_frame_equal(result, expected) + + def test_table_method_expanding_apply(self, axis, nogil, parallel, nopython): + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + def f(x): + return np.sum(x, axis=0) + 1 + + df = DataFrame(np.eye(3)) + result = df.expanding(method="table", axis=axis).apply( + f, raw=True, engine_kwargs=engine_kwargs, engine="numba" + ) + expected = df.expanding(method="single", axis=axis).apply( + f, raw=True, engine_kwargs=engine_kwargs, engine="numba" + ) + tm.assert_frame_equal(result, expected) + + def test_table_method_expanding_methods( + self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators + ): + method, kwargs = arithmetic_numba_supported_operators + + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + df = DataFrame(np.eye(3)) + expand_table = df.expanding(method="table", axis=axis) + if method in ("var", "std"): + with pytest.raises(NotImplementedError, match=f"{method} not supported"): + getattr(expand_table, method)( + engine_kwargs=engine_kwargs, engine="numba", **kwargs + ) + else: + expand_single = df.expanding(method="single", axis=axis) + result = getattr(expand_table, method)( + engine_kwargs=engine_kwargs, engine="numba", **kwargs + ) + expected = getattr(expand_single, method)( + engine_kwargs=engine_kwargs, engine="numba", **kwargs + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("data", [np.eye(3), np.ones((2, 3)), np.ones((3, 2))]) + @pytest.mark.parametrize("method", ["mean", "sum"]) + def test_table_method_ewm(self, data, method, axis, nogil, parallel, nopython): + engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython} + + df = DataFrame(data) + + result = getattr(df.ewm(com=1, method="table", axis=axis), method)( + engine_kwargs=engine_kwargs, engine="numba" + ) + expected = getattr(df.ewm(com=1, method="single", axis=axis), method)( + engine_kwargs=engine_kwargs, engine="numba" + ) + tm.assert_frame_equal(result, expected) + + +@td.skip_if_no("numba") +def test_npfunc_no_warnings(): + df = DataFrame({"col1": [1, 2, 3, 4, 5]}) + with tm.assert_produces_warning(False): + df.col1.rolling(2).apply(np.prod, raw=True, engine="numba") diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_pairwise.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_pairwise.py new file mode 100644 index 0000000000000000000000000000000000000000..3ceb58756bac6cbc5f3b1862523d4185c1ad8b47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_pairwise.py @@ -0,0 +1,445 @@ +import numpy as np +import pytest + +from pandas.compat import IS64 + +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + date_range, +) +import pandas._testing as tm +from pandas.core.algorithms import safe_sort + + +@pytest.fixture( + params=[ + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", "C"]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1.0, 0]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0.0, 1]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", 1]), + DataFrame([[2.0, 4.0], [1.0, 2.0], [5.0, 2.0], [8.0, 1.0]], columns=[1, 0.0]), + DataFrame([[2, 4.0], [1, 2.0], [5, 2.0], [8, 1.0]], columns=[0, 1.0]), + DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.0]], columns=[1.0, "X"]), + ] +) +def pairwise_frames(request): + """Pairwise frames test_pairwise""" + return request.param + + +@pytest.fixture +def pairwise_target_frame(): + """Pairwise target frame for test_pairwise""" + return DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]) + + +@pytest.fixture +def pairwise_other_frame(): + """Pairwise other frame for test_pairwise""" + return DataFrame( + [[None, 1, 1], [None, 1, 2], [None, 3, 2], [None, 8, 1]], + columns=["Y", "Z", "X"], + ) + + +def test_rolling_cov(series): + A = series + B = A + np.random.default_rng(2).standard_normal(len(A)) + + result = A.rolling(window=50, min_periods=25).cov(B) + tm.assert_almost_equal(result.iloc[-1], np.cov(A[-50:], B[-50:])[0, 1]) + + +def test_rolling_corr(series): + A = series + B = A + np.random.default_rng(2).standard_normal(len(A)) + + result = A.rolling(window=50, min_periods=25).corr(B) + tm.assert_almost_equal(result.iloc[-1], np.corrcoef(A[-50:], B[-50:])[0, 1]) + + +def test_rolling_corr_bias_correction(): + # test for correct bias correction + a = Series( + np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20) + ) + b = a.copy() + a[:5] = np.nan + b[:10] = np.nan + + result = a.rolling(window=len(a), min_periods=1).corr(b) + tm.assert_almost_equal(result.iloc[-1], a.corr(b)) + + +@pytest.mark.parametrize("func", ["cov", "corr"]) +def test_rolling_pairwise_cov_corr(func, frame): + result = getattr(frame.rolling(window=10, min_periods=5), func)() + result = result.loc[(slice(None), 1), 5] + result.index = result.index.droplevel(1) + expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5]) + tm.assert_series_equal(result, expected, check_names=False) + + +@pytest.mark.parametrize("method", ["corr", "cov"]) +def test_flex_binary_frame(method, frame): + series = frame[1] + + res = getattr(series.rolling(window=10), method)(frame) + res2 = getattr(frame.rolling(window=10), method)(series) + exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x)) + + tm.assert_frame_equal(res, exp) + tm.assert_frame_equal(res2, exp) + + frame2 = frame.copy() + frame2 = DataFrame( + np.random.default_rng(2).standard_normal(frame2.shape), + index=frame2.index, + columns=frame2.columns, + ) + + res3 = getattr(frame.rolling(window=10), method)(frame2) + exp = DataFrame( + {k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame} + ) + tm.assert_frame_equal(res3, exp) + + +@pytest.mark.parametrize("window", range(7)) +def test_rolling_corr_with_zero_variance(window): + # GH 18430 + s = Series(np.zeros(20)) + other = Series(np.arange(20)) + + assert s.rolling(window=window).corr(other=other).isna().all() + + +def test_corr_sanity(): + # GH 3155 + df = DataFrame( + np.array( + [ + [0.87024726, 0.18505595], + [0.64355431, 0.3091617], + [0.92372966, 0.50552513], + [0.00203756, 0.04520709], + [0.84780328, 0.33394331], + [0.78369152, 0.63919667], + ] + ) + ) + + res = df[0].rolling(5, center=True).corr(df[1]) + assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) + + df = DataFrame(np.random.default_rng(2).random((30, 2))) + res = df[0].rolling(5, center=True).corr(df[1]) + assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res) + + +def test_rolling_cov_diff_length(): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = s1.rolling(window=3, min_periods=2).cov(s2) + expected = Series([None, None, 2.0]) + tm.assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = s1.rolling(window=3, min_periods=2).cov(s2a) + tm.assert_series_equal(result, expected) + + +def test_rolling_corr_diff_length(): + # GH 7512 + s1 = Series([1, 2, 3], index=[0, 1, 2]) + s2 = Series([1, 3], index=[0, 2]) + result = s1.rolling(window=3, min_periods=2).corr(s2) + expected = Series([None, None, 1.0]) + tm.assert_series_equal(result, expected) + + s2a = Series([1, None, 3], index=[0, 1, 2]) + result = s1.rolling(window=3, min_periods=2).corr(s2a) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "f", + [ + lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), + lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), + ], +) +def test_rolling_functions_window_non_shrinkage_binary(f): + # corr/cov return a MI DataFrame + df = DataFrame( + [[1, 5], [3, 2], [3, 9], [-1, 0]], + columns=Index(["A", "B"], name="foo"), + index=Index(range(4), name="bar"), + ) + df_expected = DataFrame( + columns=Index(["A", "B"], name="foo"), + index=MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]), + dtype="float64", + ) + df_result = f(df) + tm.assert_frame_equal(df_result, df_expected) + + +@pytest.mark.parametrize( + "f", + [ + lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)), + lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)), + ], +) +def test_moment_functions_zero_length_pairwise(f): + df1 = DataFrame() + df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar")) + df2["a"] = df2["a"].astype("float64") + + df1_expected = DataFrame(index=MultiIndex.from_product([df1.index, df1.columns])) + df2_expected = DataFrame( + index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]), + columns=Index(["a"], name="foo"), + dtype="float64", + ) + + df1_result = f(df1) + tm.assert_frame_equal(df1_result, df1_expected) + + df2_result = f(df2) + tm.assert_frame_equal(df2_result, df2_expected) + + +class TestPairwise: + # GH 7738 + @pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()]) + def test_no_flex(self, pairwise_frames, pairwise_target_frame, f): + # DataFrame methods (which do not call flex_binary_moment()) + + result = f(pairwise_frames) + tm.assert_index_equal(result.index, pairwise_frames.columns) + tm.assert_index_equal(result.columns, pairwise_frames.columns) + expected = f(pairwise_target_frame) + # since we have sorted the results + # we can only compare non-nans + result = result.dropna().values + expected = expected.dropna().values + + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + @pytest.mark.parametrize( + "f", + [ + lambda x: x.expanding().cov(pairwise=True), + lambda x: x.expanding().corr(pairwise=True), + lambda x: x.rolling(window=3).cov(pairwise=True), + lambda x: x.rolling(window=3).corr(pairwise=True), + lambda x: x.ewm(com=3).cov(pairwise=True), + lambda x: x.ewm(com=3).corr(pairwise=True), + ], + ) + def test_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f): + # DataFrame with itself, pairwise=True + # note that we may construct the 1st level of the MI + # in a non-monotonic way, so compare accordingly + result = f(pairwise_frames) + tm.assert_index_equal( + result.index.levels[0], pairwise_frames.index, check_names=False + ) + tm.assert_index_equal( + safe_sort(result.index.levels[1]), + safe_sort(pairwise_frames.columns.unique()), + ) + tm.assert_index_equal(result.columns, pairwise_frames.columns) + expected = f(pairwise_target_frame) + # since we have sorted the results + # we can only compare non-nans + result = result.dropna().values + expected = expected.dropna().values + + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + @pytest.mark.parametrize( + "f", + [ + lambda x: x.expanding().cov(pairwise=False), + lambda x: x.expanding().corr(pairwise=False), + lambda x: x.rolling(window=3).cov(pairwise=False), + lambda x: x.rolling(window=3).corr(pairwise=False), + lambda x: x.ewm(com=3).cov(pairwise=False), + lambda x: x.ewm(com=3).corr(pairwise=False), + ], + ) + def test_no_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f): + # DataFrame with itself, pairwise=False + result = f(pairwise_frames) + tm.assert_index_equal(result.index, pairwise_frames.index) + tm.assert_index_equal(result.columns, pairwise_frames.columns) + expected = f(pairwise_target_frame) + # since we have sorted the results + # we can only compare non-nans + result = result.dropna().values + expected = expected.dropna().values + + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + @pytest.mark.parametrize( + "f", + [ + lambda x, y: x.expanding().cov(y, pairwise=True), + lambda x, y: x.expanding().corr(y, pairwise=True), + lambda x, y: x.rolling(window=3).cov(y, pairwise=True), + # TODO: We're missing a flag somewhere in meson + pytest.param( + lambda x, y: x.rolling(window=3).corr(y, pairwise=True), + marks=pytest.mark.xfail( + not IS64, reason="Precision issues on 32 bit", strict=False + ), + ), + lambda x, y: x.ewm(com=3).cov(y, pairwise=True), + lambda x, y: x.ewm(com=3).corr(y, pairwise=True), + ], + ) + def test_pairwise_with_other( + self, pairwise_frames, pairwise_target_frame, pairwise_other_frame, f + ): + # DataFrame with another DataFrame, pairwise=True + result = f(pairwise_frames, pairwise_other_frame) + tm.assert_index_equal( + result.index.levels[0], pairwise_frames.index, check_names=False + ) + tm.assert_index_equal( + safe_sort(result.index.levels[1]), + safe_sort(pairwise_other_frame.columns.unique()), + ) + expected = f(pairwise_target_frame, pairwise_other_frame) + # since we have sorted the results + # we can only compare non-nans + result = result.dropna().values + expected = expected.dropna().values + + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + @pytest.mark.filterwarnings("ignore:RuntimeWarning") + @pytest.mark.parametrize( + "f", + [ + lambda x, y: x.expanding().cov(y, pairwise=False), + lambda x, y: x.expanding().corr(y, pairwise=False), + lambda x, y: x.rolling(window=3).cov(y, pairwise=False), + lambda x, y: x.rolling(window=3).corr(y, pairwise=False), + lambda x, y: x.ewm(com=3).cov(y, pairwise=False), + lambda x, y: x.ewm(com=3).corr(y, pairwise=False), + ], + ) + def test_no_pairwise_with_other(self, pairwise_frames, pairwise_other_frame, f): + # DataFrame with another DataFrame, pairwise=False + result = ( + f(pairwise_frames, pairwise_other_frame) + if pairwise_frames.columns.is_unique + else None + ) + if result is not None: + # we can have int and str columns + expected_index = pairwise_frames.index.union(pairwise_other_frame.index) + expected_columns = pairwise_frames.columns.union( + pairwise_other_frame.columns + ) + tm.assert_index_equal(result.index, expected_index) + tm.assert_index_equal(result.columns, expected_columns) + else: + with pytest.raises(ValueError, match="'arg1' columns are not unique"): + f(pairwise_frames, pairwise_other_frame) + with pytest.raises(ValueError, match="'arg2' columns are not unique"): + f(pairwise_other_frame, pairwise_frames) + + @pytest.mark.parametrize( + "f", + [ + lambda x, y: x.expanding().cov(y), + lambda x, y: x.expanding().corr(y), + lambda x, y: x.rolling(window=3).cov(y), + lambda x, y: x.rolling(window=3).corr(y), + lambda x, y: x.ewm(com=3).cov(y), + lambda x, y: x.ewm(com=3).corr(y), + ], + ) + def test_pairwise_with_series(self, pairwise_frames, pairwise_target_frame, f): + # DataFrame with a Series + result = f(pairwise_frames, Series([1, 1, 3, 8])) + tm.assert_index_equal(result.index, pairwise_frames.index) + tm.assert_index_equal(result.columns, pairwise_frames.columns) + expected = f(pairwise_target_frame, Series([1, 1, 3, 8])) + # since we have sorted the results + # we can only compare non-nans + result = result.dropna().values + expected = expected.dropna().values + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + result = f(Series([1, 1, 3, 8]), pairwise_frames) + tm.assert_index_equal(result.index, pairwise_frames.index) + tm.assert_index_equal(result.columns, pairwise_frames.columns) + expected = f(Series([1, 1, 3, 8]), pairwise_target_frame) + # since we have sorted the results + # we can only compare non-nans + result = result.dropna().values + expected = expected.dropna().values + tm.assert_numpy_array_equal(result, expected, check_dtype=False) + + def test_corr_freq_memory_error(self): + # GH 31789 + s = Series(range(5), index=date_range("2020", periods=5)) + result = s.rolling("12h").corr(s) + expected = Series([np.nan] * 5, index=date_range("2020", periods=5)) + tm.assert_series_equal(result, expected) + + def test_cov_mulittindex(self): + # GH 34440 + + columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")]) + index = range(3) + df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns) + + result = df.ewm(alpha=0.1).cov() + + index = MultiIndex.from_product([range(3), list("ab"), list("xy"), list("AB")]) + columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")]) + expected = DataFrame( + np.vstack( + ( + np.full((8, 8), np.nan), + np.full((8, 8), 32.000000), + np.full((8, 8), 63.881919), + ) + ), + index=index, + columns=columns, + ) + + tm.assert_frame_equal(result, expected) + + def test_multindex_columns_pairwise_func(self): + # GH 21157 + columns = MultiIndex.from_arrays([["M", "N"], ["P", "Q"]], names=["a", "b"]) + df = DataFrame(np.ones((5, 2)), columns=columns) + result = df.rolling(3).corr() + expected = DataFrame( + np.nan, + index=MultiIndex.from_arrays( + [ + np.repeat(np.arange(5, dtype=np.int64), 2), + ["M", "N"] * 5, + ["P", "Q"] * 5, + ], + names=[None, "a", "b"], + ), + columns=columns, + ) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling.py new file mode 100644 index 0000000000000000000000000000000000000000..f353a7fa2f0fe5f0480c31b40a2907943ff5a5c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling.py @@ -0,0 +1,1979 @@ +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas.compat import ( + IS64, + is_platform_arm, + is_platform_power, +) + +from pandas import ( + DataFrame, + DatetimeIndex, + MultiIndex, + Series, + Timedelta, + Timestamp, + date_range, + period_range, + to_datetime, + to_timedelta, +) +import pandas._testing as tm +from pandas.api.indexers import BaseIndexer +from pandas.core.indexers.objects import VariableOffsetWindowIndexer + +from pandas.tseries.offsets import BusinessDay + + +def test_doc_string(): + df = DataFrame({"B": [0, 1, 2, np.nan, 4]}) + df + df.rolling(2).sum() + df.rolling(2, min_periods=1).sum() + + +def test_constructor(frame_or_series): + # GH 12669 + + c = frame_or_series(range(5)).rolling + + # valid + c(0) + c(window=2) + c(window=2, min_periods=1) + c(window=2, min_periods=1, center=True) + c(window=2, min_periods=1, center=False) + + # GH 13383 + + msg = "window must be an integer 0 or greater" + + with pytest.raises(ValueError, match=msg): + c(-1) + + +@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])]) +def test_invalid_constructor(frame_or_series, w): + # not valid + + c = frame_or_series(range(5)).rolling + + msg = "|".join( + [ + "window must be an integer", + "passed window foo is not compatible with a datetimelike index", + ] + ) + with pytest.raises(ValueError, match=msg): + c(window=w) + + msg = "min_periods must be an integer" + with pytest.raises(ValueError, match=msg): + c(window=2, min_periods=w) + + msg = "center must be a boolean" + with pytest.raises(ValueError, match=msg): + c(window=2, min_periods=1, center=w) + + +@pytest.mark.parametrize( + "window", + [ + timedelta(days=3), + Timedelta(days=3), + "3D", + VariableOffsetWindowIndexer( + index=date_range("2015-12-25", periods=5), offset=BusinessDay(1) + ), + ], +) +def test_freq_window_not_implemented(window): + # GH 15354 + df = DataFrame( + np.arange(10), + index=date_range("2015-12-24", periods=10, freq="D"), + ) + with pytest.raises( + NotImplementedError, match="^step (not implemented|is not supported)" + ): + df.rolling(window, step=3).sum() + + +@pytest.mark.parametrize("agg", ["cov", "corr"]) +def test_step_not_implemented_for_cov_corr(agg): + # GH 15354 + roll = DataFrame(range(2)).rolling(1, step=2) + with pytest.raises(NotImplementedError, match="step not implemented"): + getattr(roll, agg)() + + +@pytest.mark.parametrize("window", [timedelta(days=3), Timedelta(days=3)]) +def test_constructor_with_timedelta_window(window): + # GH 15440 + n = 10 + df = DataFrame( + {"value": np.arange(n)}, + index=date_range("2015-12-24", periods=n, freq="D"), + ) + expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3)) + + result = df.rolling(window=window).sum() + expected = DataFrame( + {"value": expected_data}, + index=date_range("2015-12-24", periods=n, freq="D"), + ) + tm.assert_frame_equal(result, expected) + expected = df.rolling("3D").sum() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("window", [timedelta(days=3), Timedelta(days=3), "3D"]) +def test_constructor_timedelta_window_and_minperiods(window, raw): + # GH 15305 + n = 10 + df = DataFrame( + {"value": np.arange(n)}, + index=date_range("2017-08-08", periods=n, freq="D"), + ) + expected = DataFrame( + {"value": np.append([np.nan, 1.0], np.arange(3.0, 27.0, 3))}, + index=date_range("2017-08-08", periods=n, freq="D"), + ) + result_roll_sum = df.rolling(window=window, min_periods=2).sum() + result_roll_generic = df.rolling(window=window, min_periods=2).apply(sum, raw=raw) + tm.assert_frame_equal(result_roll_sum, expected) + tm.assert_frame_equal(result_roll_generic, expected) + + +def test_closed_fixed(closed, arithmetic_win_operators): + # GH 34315 + func_name = arithmetic_win_operators + df_fixed = DataFrame({"A": [0, 1, 2, 3, 4]}) + df_time = DataFrame({"A": [0, 1, 2, 3, 4]}, index=date_range("2020", periods=5)) + + result = getattr( + df_fixed.rolling(2, closed=closed, min_periods=1), + func_name, + )() + expected = getattr( + df_time.rolling("2D", closed=closed, min_periods=1), + func_name, + )().reset_index(drop=True) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "closed, window_selections", + [ + ( + "both", + [ + [True, True, False, False, False], + [True, True, True, False, False], + [False, True, True, True, False], + [False, False, True, True, True], + [False, False, False, True, True], + ], + ), + ( + "left", + [ + [True, False, False, False, False], + [True, True, False, False, False], + [False, True, True, False, False], + [False, False, True, True, False], + [False, False, False, True, True], + ], + ), + ( + "right", + [ + [True, True, False, False, False], + [False, True, True, False, False], + [False, False, True, True, False], + [False, False, False, True, True], + [False, False, False, False, True], + ], + ), + ( + "neither", + [ + [True, False, False, False, False], + [False, True, False, False, False], + [False, False, True, False, False], + [False, False, False, True, False], + [False, False, False, False, True], + ], + ), + ], +) +def test_datetimelike_centered_selections( + closed, window_selections, arithmetic_win_operators +): + # GH 34315 + func_name = arithmetic_win_operators + df_time = DataFrame( + {"A": [0.0, 1.0, 2.0, 3.0, 4.0]}, index=date_range("2020", periods=5) + ) + + expected = DataFrame( + {"A": [getattr(df_time["A"].iloc[s], func_name)() for s in window_selections]}, + index=date_range("2020", periods=5), + ) + + if func_name == "sem": + kwargs = {"ddof": 0} + else: + kwargs = {} + + result = getattr( + df_time.rolling("2D", closed=closed, min_periods=1, center=True), + func_name, + )(**kwargs) + + tm.assert_frame_equal(result, expected, check_dtype=False) + + +@pytest.mark.parametrize( + "window,closed,expected", + [ + ("3s", "right", [3.0, 3.0, 3.0]), + ("3s", "both", [3.0, 3.0, 3.0]), + ("3s", "left", [3.0, 3.0, 3.0]), + ("3s", "neither", [3.0, 3.0, 3.0]), + ("2s", "right", [3.0, 2.0, 2.0]), + ("2s", "both", [3.0, 3.0, 3.0]), + ("2s", "left", [1.0, 3.0, 3.0]), + ("2s", "neither", [1.0, 2.0, 2.0]), + ], +) +def test_datetimelike_centered_offset_covers_all( + window, closed, expected, frame_or_series +): + # GH 42753 + + index = [ + Timestamp("20130101 09:00:01"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:02"), + ] + df = frame_or_series([1, 1, 1], index=index) + + result = df.rolling(window, closed=closed, center=True).sum() + expected = frame_or_series(expected, index=index) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "window,closed,expected", + [ + ("2D", "right", [4, 4, 4, 4, 4, 4, 2, 2]), + ("2D", "left", [2, 2, 4, 4, 4, 4, 4, 4]), + ("2D", "both", [4, 4, 6, 6, 6, 6, 4, 4]), + ("2D", "neither", [2, 2, 2, 2, 2, 2, 2, 2]), + ], +) +def test_datetimelike_nonunique_index_centering( + window, closed, expected, frame_or_series +): + index = DatetimeIndex( + [ + "2020-01-01", + "2020-01-01", + "2020-01-02", + "2020-01-02", + "2020-01-03", + "2020-01-03", + "2020-01-04", + "2020-01-04", + ] + ) + + df = frame_or_series([1] * 8, index=index, dtype=float) + expected = frame_or_series(expected, index=index, dtype=float) + + result = df.rolling(window, center=True, closed=closed).sum() + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "closed,expected", + [ + ("left", [np.nan, np.nan, 1, 1, 1, 10, 14, 14, 18, 21]), + ("neither", [np.nan, np.nan, 1, 1, 1, 9, 5, 5, 13, 8]), + ("right", [0, 1, 3, 6, 10, 14, 11, 18, 21, 17]), + ("both", [0, 1, 3, 6, 10, 15, 20, 27, 26, 30]), + ], +) +def test_variable_window_nonunique(closed, expected, frame_or_series): + # GH 20712 + index = DatetimeIndex( + [ + "2011-01-01", + "2011-01-01", + "2011-01-02", + "2011-01-02", + "2011-01-02", + "2011-01-03", + "2011-01-04", + "2011-01-04", + "2011-01-05", + "2011-01-06", + ] + ) + + df = frame_or_series(range(10), index=index, dtype=float) + expected = frame_or_series(expected, index=index, dtype=float) + + result = df.rolling("2D", closed=closed).sum() + + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "closed,expected", + [ + ("left", [np.nan, np.nan, 1, 1, 1, 10, 15, 15, 18, 21]), + ("neither", [np.nan, np.nan, 1, 1, 1, 10, 15, 15, 13, 8]), + ("right", [0, 1, 3, 6, 10, 15, 21, 28, 21, 17]), + ("both", [0, 1, 3, 6, 10, 15, 21, 28, 26, 30]), + ], +) +def test_variable_offset_window_nonunique(closed, expected, frame_or_series): + # GH 20712 + index = DatetimeIndex( + [ + "2011-01-01", + "2011-01-01", + "2011-01-02", + "2011-01-02", + "2011-01-02", + "2011-01-03", + "2011-01-04", + "2011-01-04", + "2011-01-05", + "2011-01-06", + ] + ) + + df = frame_or_series(range(10), index=index, dtype=float) + expected = frame_or_series(expected, index=index, dtype=float) + + offset = BusinessDay(2) + indexer = VariableOffsetWindowIndexer(index=index, offset=offset) + result = df.rolling(indexer, closed=closed, min_periods=1).sum() + + tm.assert_equal(result, expected) + + +def test_even_number_window_alignment(): + # see discussion in GH 38780 + s = Series(range(3), index=date_range(start="2020-01-01", freq="D", periods=3)) + + # behavior of index- and datetime-based windows differs here! + # s.rolling(window=2, min_periods=1, center=True).mean() + + result = s.rolling(window="2D", min_periods=1, center=True).mean() + + expected = Series([0.5, 1.5, 2], index=s.index) + + tm.assert_series_equal(result, expected) + + +def test_closed_fixed_binary_col(center, step): + # GH 34315 + data = [0, 1, 1, 0, 0, 1, 0, 1] + df = DataFrame( + {"binary_col": data}, + index=date_range(start="2020-01-01", freq="min", periods=len(data)), + ) + + if center: + expected_data = [2 / 3, 0.5, 0.4, 0.5, 0.428571, 0.5, 0.571429, 0.5] + else: + expected_data = [np.nan, 0, 0.5, 2 / 3, 0.5, 0.4, 0.5, 0.428571] + + expected = DataFrame( + expected_data, + columns=["binary_col"], + index=date_range(start="2020-01-01", freq="min", periods=len(expected_data)), + )[::step] + + rolling = df.rolling( + window=len(df), closed="left", min_periods=1, center=center, step=step + ) + result = rolling.mean() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("closed", ["neither", "left"]) +def test_closed_empty(closed, arithmetic_win_operators): + # GH 26005 + func_name = arithmetic_win_operators + ser = Series(data=np.arange(5), index=date_range("2000", periods=5, freq="2D")) + roll = ser.rolling("1D", closed=closed) + + result = getattr(roll, func_name)() + expected = Series([np.nan] * 5, index=ser.index) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_closed_one_entry(func): + # GH24718 + ser = Series(data=[2], index=date_range("2000", periods=1)) + result = getattr(ser.rolling("10D", closed="left"), func)() + tm.assert_series_equal(result, Series([np.nan], index=ser.index)) + + +@pytest.mark.parametrize("func", ["min", "max"]) +def test_closed_one_entry_groupby(func): + # GH24718 + ser = DataFrame( + data={"A": [1, 1, 2], "B": [3, 2, 1]}, + index=date_range("2000", periods=3), + ) + result = getattr( + ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func + )() + exp_idx = MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index], names=("A", None)) + expected = Series(data=[np.nan, 3, np.nan], index=exp_idx, name="B") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("input_dtype", ["int", "float"]) +@pytest.mark.parametrize( + "func,closed,expected", + [ + ("min", "right", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]), + ("min", "both", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]), + ("min", "neither", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]), + ("min", "left", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]), + ("max", "right", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("max", "both", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("max", "neither", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]), + ("max", "left", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]), + ], +) +def test_closed_min_max_datetime(input_dtype, func, closed, expected): + # see gh-21704 + ser = Series( + data=np.arange(10).astype(input_dtype), + index=date_range("2000", periods=10), + ) + + result = getattr(ser.rolling("3D", closed=closed), func)() + expected = Series(expected, index=ser.index) + tm.assert_series_equal(result, expected) + + +def test_closed_uneven(): + # see gh-21704 + ser = Series(data=np.arange(10), index=date_range("2000", periods=10)) + + # uneven + ser = ser.drop(index=ser.index[[1, 5]]) + result = ser.rolling("3D", closed="left").min() + expected = Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "func,closed,expected", + [ + ("min", "right", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), + ("min", "both", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]), + ("min", "neither", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), + ("min", "left", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]), + ("max", "right", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]), + ("max", "both", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]), + ("max", "neither", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]), + ("max", "left", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]), + ], +) +def test_closed_min_max_minp(func, closed, expected): + # see gh-21704 + ser = Series(data=np.arange(10), index=date_range("2000", periods=10)) + # Explicit cast to float to avoid implicit cast when setting nan + ser = ser.astype("float") + ser[ser.index[-3:]] = np.nan + result = getattr(ser.rolling("3D", min_periods=2, closed=closed), func)() + expected = Series(expected, index=ser.index) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "closed,expected", + [ + ("right", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]), + ("both", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]), + ("neither", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]), + ("left", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]), + ], +) +def test_closed_median_quantile(closed, expected): + # GH 26005 + ser = Series(data=np.arange(10), index=date_range("2000", periods=10)) + roll = ser.rolling("3D", closed=closed) + expected = Series(expected, index=ser.index) + + result = roll.median() + tm.assert_series_equal(result, expected) + + result = roll.quantile(0.5) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("roller", ["1s", 1]) +def tests_empty_df_rolling(roller): + # GH 15819 Verifies that datetime and integer rolling windows can be + # applied to empty DataFrames + expected = DataFrame() + result = DataFrame().rolling(roller).sum() + tm.assert_frame_equal(result, expected) + + # Verifies that datetime and integer rolling windows can be applied to + # empty DataFrames with datetime index + expected = DataFrame(index=DatetimeIndex([])) + result = DataFrame(index=DatetimeIndex([])).rolling(roller).sum() + tm.assert_frame_equal(result, expected) + + +def test_empty_window_median_quantile(): + # GH 26005 + expected = Series([np.nan, np.nan, np.nan]) + roll = Series(np.arange(3)).rolling(0) + + result = roll.median() + tm.assert_series_equal(result, expected) + + result = roll.quantile(0.1) + tm.assert_series_equal(result, expected) + + +def test_missing_minp_zero(): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = Series([np.nan]) + result = x.rolling(1, min_periods=0).sum() + expected = Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.rolling(1, min_periods=1).sum() + expected = Series([np.nan]) + tm.assert_series_equal(result, expected) + + +def test_missing_minp_zero_variable(): + # https://github.com/pandas-dev/pandas/pull/18921 + x = Series( + [np.nan] * 4, + index=DatetimeIndex(["2017-01-01", "2017-01-04", "2017-01-06", "2017-01-07"]), + ) + result = x.rolling(Timedelta("2d"), min_periods=0).sum() + expected = Series(0.0, index=x.index) + tm.assert_series_equal(result, expected) + + +def test_multi_index_names(): + # GH 16789, 16825 + cols = MultiIndex.from_product([["A", "B"], ["C", "D", "E"]], names=["1", "2"]) + df = DataFrame(np.ones((10, 6)), columns=cols) + result = df.rolling(3).cov() + + tm.assert_index_equal(result.columns, df.columns) + assert result.index.names == [None, "1", "2"] + + +def test_rolling_axis_sum(axis_frame): + # see gh-23372. + df = DataFrame(np.ones((10, 20))) + axis = df._get_axis_number(axis_frame) + + if axis == 0: + msg = "The 'axis' keyword in DataFrame.rolling" + expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)}) + else: + # axis == 1 + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling(3, axis=axis_frame).sum() + tm.assert_frame_equal(result, expected) + + +def test_rolling_axis_count(axis_frame): + # see gh-26055 + df = DataFrame({"x": range(3), "y": range(3)}) + + axis = df._get_axis_number(axis_frame) + + if axis in [0, "index"]: + msg = "The 'axis' keyword in DataFrame.rolling" + expected = DataFrame({"x": [1.0, 2.0, 2.0], "y": [1.0, 2.0, 2.0]}) + else: + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + expected = DataFrame({"x": [1.0, 1.0, 1.0], "y": [2.0, 2.0, 2.0]}) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling(2, axis=axis_frame, min_periods=0).count() + tm.assert_frame_equal(result, expected) + + +def test_readonly_array(): + # GH-27766 + arr = np.array([1, 3, np.nan, 3, 5]) + arr.setflags(write=False) + result = Series(arr).rolling(2).mean() + expected = Series([np.nan, 2, np.nan, np.nan, 4]) + tm.assert_series_equal(result, expected) + + +def test_rolling_datetime(axis_frame, tz_naive_fixture): + # GH-28192 + tz = tz_naive_fixture + df = DataFrame( + {i: [1] * 2 for i in date_range("2019-8-01", "2019-08-03", freq="D", tz=tz)} + ) + + if axis_frame in [0, "index"]: + msg = "The 'axis' keyword in DataFrame.rolling" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.T.rolling("2D", axis=axis_frame).sum().T + else: + msg = "Support for axis=1 in DataFrame.rolling" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling("2D", axis=axis_frame).sum() + expected = DataFrame( + { + **{ + i: [1.0] * 2 + for i in date_range("2019-8-01", periods=1, freq="D", tz=tz) + }, + **{ + i: [2.0] * 2 + for i in date_range("2019-8-02", "2019-8-03", freq="D", tz=tz) + }, + } + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("center", [True, False]) +def test_rolling_window_as_string(center): + # see gh-22590 + date_today = datetime.now() + days = date_range(date_today, date_today + timedelta(365), freq="D") + + data = np.ones(len(days)) + df = DataFrame({"DateCol": days, "metric": data}) + + df.set_index("DateCol", inplace=True) + result = df.rolling(window="21D", min_periods=2, closed="left", center=center)[ + "metric" + ].agg("max") + + index = days.rename("DateCol") + index = index._with_freq(None) + expected_data = np.ones(len(days), dtype=np.float64) + if not center: + expected_data[:2] = np.nan + expected = Series(expected_data, index=index, name="metric") + tm.assert_series_equal(result, expected) + + +def test_min_periods1(): + # GH#6795 + df = DataFrame([0, 1, 2, 1, 0], columns=["a"]) + result = df["a"].rolling(3, center=True, min_periods=1).max() + expected = Series([1.0, 2.0, 2.0, 2.0, 1.0], name="a") + tm.assert_series_equal(result, expected) + + +def test_rolling_count_with_min_periods(frame_or_series): + # GH 26996 + result = frame_or_series(range(5)).rolling(3, min_periods=3).count() + expected = frame_or_series([np.nan, np.nan, 3.0, 3.0, 3.0]) + tm.assert_equal(result, expected) + + +def test_rolling_count_default_min_periods_with_null_values(frame_or_series): + # GH 26996 + values = [1, 2, 3, np.nan, 4, 5, 6] + expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0] + + # GH 31302 + result = frame_or_series(values).rolling(3, min_periods=0).count() + expected = frame_or_series(expected_counts) + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "df,expected,window,min_periods", + [ + ( + DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), + [ + ({"A": [1], "B": [4]}, [0]), + ({"A": [1, 2], "B": [4, 5]}, [0, 1]), + ({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]), + ], + 3, + None, + ), + ( + DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), + [ + ({"A": [1], "B": [4]}, [0]), + ({"A": [1, 2], "B": [4, 5]}, [0, 1]), + ({"A": [2, 3], "B": [5, 6]}, [1, 2]), + ], + 2, + 1, + ), + ( + DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), + [ + ({"A": [1], "B": [4]}, [0]), + ({"A": [1, 2], "B": [4, 5]}, [0, 1]), + ({"A": [2, 3], "B": [5, 6]}, [1, 2]), + ], + 2, + 2, + ), + ( + DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), + [ + ({"A": [1], "B": [4]}, [0]), + ({"A": [2], "B": [5]}, [1]), + ({"A": [3], "B": [6]}, [2]), + ], + 1, + 1, + ), + ( + DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), + [ + ({"A": [1], "B": [4]}, [0]), + ({"A": [2], "B": [5]}, [1]), + ({"A": [3], "B": [6]}, [2]), + ], + 1, + 0, + ), + (DataFrame({"A": [1], "B": [4]}), [], 2, None), + (DataFrame({"A": [1], "B": [4]}), [], 2, 1), + (DataFrame(), [({}, [])], 2, None), + ( + DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}), + [ + ({"A": [1.0], "B": [np.nan]}, [0]), + ({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]), + ({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]), + ], + 3, + 2, + ), + ], +) +def test_iter_rolling_dataframe(df, expected, window, min_periods): + # GH 11704 + expected = [DataFrame(values, index=index) for (values, index) in expected] + + for expected, actual in zip(expected, df.rolling(window, min_periods=min_periods)): + tm.assert_frame_equal(actual, expected) + + +@pytest.mark.parametrize( + "expected,window", + [ + ( + [ + ({"A": [1], "B": [4]}, [0]), + ({"A": [1, 2], "B": [4, 5]}, [0, 1]), + ({"A": [2, 3], "B": [5, 6]}, [1, 2]), + ], + "2D", + ), + ( + [ + ({"A": [1], "B": [4]}, [0]), + ({"A": [1, 2], "B": [4, 5]}, [0, 1]), + ({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]), + ], + "3D", + ), + ( + [ + ({"A": [1], "B": [4]}, [0]), + ({"A": [2], "B": [5]}, [1]), + ({"A": [3], "B": [6]}, [2]), + ], + "1D", + ), + ], +) +def test_iter_rolling_on_dataframe(expected, window): + # GH 11704, 40373 + df = DataFrame( + { + "A": [1, 2, 3, 4, 5], + "B": [4, 5, 6, 7, 8], + "C": date_range(start="2016-01-01", periods=5, freq="D"), + } + ) + + expected = [ + DataFrame(values, index=df.loc[index, "C"]) for (values, index) in expected + ] + for expected, actual in zip(expected, df.rolling(window, on="C")): + tm.assert_frame_equal(actual, expected) + + +def test_iter_rolling_on_dataframe_unordered(): + # GH 43386 + df = DataFrame({"a": ["x", "y", "x"], "b": [0, 1, 2]}) + results = list(df.groupby("a").rolling(2)) + expecteds = [df.iloc[idx, [1]] for idx in [[0], [0, 2], [1]]] + for result, expected in zip(results, expecteds): + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "ser,expected,window, min_periods", + [ + ( + Series([1, 2, 3]), + [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], + 3, + None, + ), + ( + Series([1, 2, 3]), + [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], + 3, + 1, + ), + ( + Series([1, 2, 3]), + [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])], + 2, + 1, + ), + ( + Series([1, 2, 3]), + [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])], + 2, + 2, + ), + (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 0), + (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 1), + (Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2, 0), + (Series([], dtype="int64"), [], 2, 1), + ], +) +def test_iter_rolling_series(ser, expected, window, min_periods): + # GH 11704 + expected = [Series(values, index=index) for (values, index) in expected] + + for expected, actual in zip(expected, ser.rolling(window, min_periods=min_periods)): + tm.assert_series_equal(actual, expected) + + +@pytest.mark.parametrize( + "expected,expected_index,window", + [ + ( + [[0], [1], [2], [3], [4]], + [ + date_range("2020-01-01", periods=1, freq="D"), + date_range("2020-01-02", periods=1, freq="D"), + date_range("2020-01-03", periods=1, freq="D"), + date_range("2020-01-04", periods=1, freq="D"), + date_range("2020-01-05", periods=1, freq="D"), + ], + "1D", + ), + ( + [[0], [0, 1], [1, 2], [2, 3], [3, 4]], + [ + date_range("2020-01-01", periods=1, freq="D"), + date_range("2020-01-01", periods=2, freq="D"), + date_range("2020-01-02", periods=2, freq="D"), + date_range("2020-01-03", periods=2, freq="D"), + date_range("2020-01-04", periods=2, freq="D"), + ], + "2D", + ), + ( + [[0], [0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]], + [ + date_range("2020-01-01", periods=1, freq="D"), + date_range("2020-01-01", periods=2, freq="D"), + date_range("2020-01-01", periods=3, freq="D"), + date_range("2020-01-02", periods=3, freq="D"), + date_range("2020-01-03", periods=3, freq="D"), + ], + "3D", + ), + ], +) +def test_iter_rolling_datetime(expected, expected_index, window): + # GH 11704 + ser = Series(range(5), index=date_range(start="2020-01-01", periods=5, freq="D")) + + expected = [ + Series(values, index=idx) for (values, idx) in zip(expected, expected_index) + ] + + for expected, actual in zip(expected, ser.rolling(window)): + tm.assert_series_equal(actual, expected) + + +@pytest.mark.parametrize( + "grouping,_index", + [ + ( + {"level": 0}, + MultiIndex.from_tuples( + [(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None] + ), + ), + ( + {"by": "X"}, + MultiIndex.from_tuples( + [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1)], names=["X", None] + ), + ), + ], +) +def test_rolling_positional_argument(grouping, _index, raw): + # GH 34605 + + def scaled_sum(*args): + if len(args) < 2: + raise ValueError("The function needs two arguments") + array, scale = args + return array.sum() / scale + + df = DataFrame(data={"X": range(5)}, index=[0, 0, 1, 1, 1]) + + expected = DataFrame(data={"X": [0.0, 0.5, 1.0, 1.5, 2.0]}, index=_index) + # GH 40341 + if "by" in grouping: + expected = expected.drop(columns="X", errors="ignore") + result = df.groupby(**grouping).rolling(1).apply(scaled_sum, raw=raw, args=(2,)) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("add", [0.0, 2.0]) +def test_rolling_numerical_accuracy_kahan_mean(add, unit): + # GH: 36031 implementing kahan summation + dti = DatetimeIndex( + [ + Timestamp("19700101 09:00:00"), + Timestamp("19700101 09:00:03"), + Timestamp("19700101 09:00:06"), + ] + ).as_unit(unit) + df = DataFrame( + {"A": [3002399751580331.0 + add, -0.0, -0.0]}, + index=dti, + ) + result = ( + df.resample("1s").ffill().rolling("3s", closed="left", min_periods=3).mean() + ) + dates = date_range("19700101 09:00:00", periods=7, freq="s", unit=unit) + expected = DataFrame( + { + "A": [ + np.nan, + np.nan, + np.nan, + 3002399751580330.5, + 2001599834386887.25, + 1000799917193443.625, + 0.0, + ] + }, + index=dates, + ) + tm.assert_frame_equal(result, expected) + + +def test_rolling_numerical_accuracy_kahan_sum(): + # GH: 13254 + df = DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=["x"]) + result = df["x"].rolling(3).sum() + expected = Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name="x") + tm.assert_series_equal(result, expected) + + +def test_rolling_numerical_accuracy_jump(): + # GH: 32761 + index = date_range(start="2020-01-01", end="2020-01-02", freq="60s").append( + DatetimeIndex(["2020-01-03"]) + ) + data = np.random.default_rng(2).random(len(index)) + + df = DataFrame({"data": data}, index=index) + result = df.rolling("60s").mean() + tm.assert_frame_equal(result, df[["data"]]) + + +def test_rolling_numerical_accuracy_small_values(): + # GH: 10319 + s = Series( + data=[0.00012456, 0.0003, -0.0, -0.0], + index=date_range("1999-02-03", "1999-02-06"), + ) + result = s.rolling(1).mean() + tm.assert_series_equal(result, s) + + +def test_rolling_numerical_too_large_numbers(): + # GH: 11645 + dates = date_range("2015-01-01", periods=10, freq="D") + ds = Series(data=range(10), index=dates, dtype=np.float64) + ds.iloc[2] = -9e33 + result = ds.rolling(5).mean() + expected = Series( + [ + np.nan, + np.nan, + np.nan, + np.nan, + -1.8e33, + -1.8e33, + -1.8e33, + 5.0, + 6.0, + 7.0, + ], + index=dates, + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + ("func", "value"), + [("sum", 2.0), ("max", 1.0), ("min", 1.0), ("mean", 1.0), ("median", 1.0)], +) +def test_rolling_mixed_dtypes_axis_1(func, value): + # GH: 20649 + df = DataFrame(1, index=[1, 2], columns=["a", "b", "c"]) + df["c"] = 1.0 + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + roll = df.rolling(window=2, min_periods=1, axis=1) + result = getattr(roll, func)() + expected = DataFrame( + {"a": [1.0, 1.0], "b": [value, value], "c": [value, value]}, + index=[1, 2], + ) + tm.assert_frame_equal(result, expected) + + +def test_rolling_axis_one_with_nan(): + # GH: 35596 + df = DataFrame( + [ + [0, 1, 2, 4, np.nan, np.nan, np.nan], + [0, 1, 2, np.nan, np.nan, np.nan, np.nan], + [0, 2, 2, np.nan, 2, np.nan, 1], + ] + ) + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling(window=7, min_periods=1, axis="columns").sum() + expected = DataFrame( + [ + [0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0], + [0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0], + [0.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0], + ] + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "value", + ["test", to_datetime("2019-12-31"), to_timedelta("1 days 06:05:01.00003")], +) +def test_rolling_axis_1_non_numeric_dtypes(value): + # GH: 20649 + df = DataFrame({"a": [1, 2]}) + df["b"] = value + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling(window=2, min_periods=1, axis=1).sum() + expected = DataFrame({"a": [1.0, 2.0]}) + tm.assert_frame_equal(result, expected) + + +def test_rolling_on_df_transposed(): + # GH: 32724 + df = DataFrame({"A": [1, None], "B": [4, 5], "C": [7, 8]}) + expected = DataFrame({"A": [1.0, np.nan], "B": [5.0, 5.0], "C": [11.0, 13.0]}) + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling(min_periods=1, window=2, axis=1).sum() + tm.assert_frame_equal(result, expected) + + result = df.T.rolling(min_periods=1, window=2).sum().T + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + ("index", "window"), + [ + ( + period_range(start="2020-01-01 08:00", end="2020-01-01 08:08", freq="min"), + "2min", + ), + ( + period_range( + start="2020-01-01 08:00", end="2020-01-01 12:00", freq="30min" + ), + "1h", + ), + ], +) +@pytest.mark.parametrize( + ("func", "values"), + [ + ("min", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6]), + ("max", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7]), + ("sum", [np.nan, 0, 1, 3, 5, 7, 9, 11, 13]), + ], +) +def test_rolling_period_index(index, window, func, values): + # GH: 34225 + ds = Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index) + result = getattr(ds.rolling(window, closed="left"), func)() + expected = Series(values, index=index) + tm.assert_series_equal(result, expected) + + +def test_rolling_sem(frame_or_series): + # GH: 26476 + obj = frame_or_series([0, 1, 2]) + result = obj.rolling(2, min_periods=1).sem() + if isinstance(result, DataFrame): + result = Series(result[0].values) + expected = Series([np.nan] + [0.7071067811865476] * 2) + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail( + is_platform_arm() or is_platform_power(), + reason="GH 38921", +) +@pytest.mark.parametrize( + ("func", "third_value", "values"), + [ + ("var", 1, [5e33, 0, 0.5, 0.5, 2, 0]), + ("std", 1, [7.071068e16, 0, 0.7071068, 0.7071068, 1.414214, 0]), + ("var", 2, [5e33, 0.5, 0, 0.5, 2, 0]), + ("std", 2, [7.071068e16, 0.7071068, 0, 0.7071068, 1.414214, 0]), + ], +) +def test_rolling_var_numerical_issues(func, third_value, values): + # GH: 37051 + ds = Series([99999999999999999, 1, third_value, 2, 3, 1, 1]) + result = getattr(ds.rolling(2), func)() + expected = Series([np.nan] + values) + tm.assert_series_equal(result, expected) + # GH 42064 + # new `roll_var` will output 0.0 correctly + tm.assert_series_equal(result == 0, expected == 0) + + +def test_timeoffset_as_window_parameter_for_corr(unit): + # GH: 28266 + dti = DatetimeIndex( + [ + Timestamp("20130101 09:00:00"), + Timestamp("20130102 09:00:02"), + Timestamp("20130103 09:00:03"), + Timestamp("20130105 09:00:05"), + Timestamp("20130106 09:00:06"), + ] + ).as_unit(unit) + mi = MultiIndex.from_product([dti, ["B", "A"]]) + + exp = DataFrame( + { + "B": [ + np.nan, + np.nan, + 0.9999999999999998, + -1.0, + 1.0, + -0.3273268353539892, + 0.9999999999999998, + 1.0, + 0.9999999999999998, + 1.0, + ], + "A": [ + np.nan, + np.nan, + -1.0, + 1.0000000000000002, + -0.3273268353539892, + 0.9999999999999966, + 1.0, + 1.0000000000000002, + 1.0, + 1.0000000000000002, + ], + }, + index=mi, + ) + + df = DataFrame( + {"B": [0, 1, 2, 4, 3], "A": [7, 4, 6, 9, 3]}, + index=dti, + ) + + res = df.rolling(window="3d").corr() + + tm.assert_frame_equal(exp, res) + + +@pytest.mark.parametrize("method", ["var", "sum", "mean", "skew", "kurt", "min", "max"]) +def test_rolling_decreasing_indices(method): + """ + Make sure that decreasing indices give the same results as increasing indices. + + GH 36933 + """ + df = DataFrame({"values": np.arange(-15, 10) ** 2}) + df_reverse = DataFrame({"values": df["values"][::-1]}, index=df.index[::-1]) + + increasing = getattr(df.rolling(window=5), method)() + decreasing = getattr(df_reverse.rolling(window=5), method)() + + assert np.abs(decreasing.values[::-1][:-4] - increasing.values[4:]).max() < 1e-12 + + +@pytest.mark.parametrize( + "window,closed,expected", + [ + ("2s", "right", [1.0, 3.0, 5.0, 3.0]), + ("2s", "left", [0.0, 1.0, 3.0, 5.0]), + ("2s", "both", [1.0, 3.0, 6.0, 5.0]), + ("2s", "neither", [0.0, 1.0, 2.0, 3.0]), + ("3s", "right", [1.0, 3.0, 6.0, 5.0]), + ("3s", "left", [1.0, 3.0, 6.0, 5.0]), + ("3s", "both", [1.0, 3.0, 6.0, 5.0]), + ("3s", "neither", [1.0, 3.0, 6.0, 5.0]), + ], +) +def test_rolling_decreasing_indices_centered(window, closed, expected, frame_or_series): + """ + Ensure that a symmetrical inverted index return same result as non-inverted. + """ + # GH 43927 + + index = date_range("2020", periods=4, freq="1s") + df_inc = frame_or_series(range(4), index=index) + df_dec = frame_or_series(range(4), index=index[::-1]) + + expected_inc = frame_or_series(expected, index=index) + expected_dec = frame_or_series(expected, index=index[::-1]) + + result_inc = df_inc.rolling(window, closed=closed, center=True).sum() + result_dec = df_dec.rolling(window, closed=closed, center=True).sum() + + tm.assert_equal(result_inc, expected_inc) + tm.assert_equal(result_dec, expected_dec) + + +@pytest.mark.parametrize( + "window,expected", + [ + ("1ns", [1.0, 1.0, 1.0, 1.0]), + ("3ns", [2.0, 3.0, 3.0, 2.0]), + ], +) +def test_rolling_center_nanosecond_resolution( + window, closed, expected, frame_or_series +): + index = date_range("2020", periods=4, freq="1ns") + df = frame_or_series([1, 1, 1, 1], index=index, dtype=float) + expected = frame_or_series(expected, index=index, dtype=float) + result = df.rolling(window, closed=closed, center=True).sum() + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "method,expected", + [ + ( + "var", + [ + float("nan"), + 43.0, + float("nan"), + 136.333333, + 43.5, + 94.966667, + 182.0, + 318.0, + ], + ), + ( + "mean", + [float("nan"), 7.5, float("nan"), 21.5, 6.0, 9.166667, 13.0, 17.5], + ), + ( + "sum", + [float("nan"), 30.0, float("nan"), 86.0, 30.0, 55.0, 91.0, 140.0], + ), + ( + "skew", + [ + float("nan"), + 0.709296, + float("nan"), + 0.407073, + 0.984656, + 0.919184, + 0.874674, + 0.842418, + ], + ), + ( + "kurt", + [ + float("nan"), + -0.5916711736073559, + float("nan"), + -1.0028993131317954, + -0.06103844629409494, + -0.254143227116194, + -0.37362637362637585, + -0.45439658241367054, + ], + ), + ], +) +def test_rolling_non_monotonic(method, expected): + """ + Make sure the (rare) branch of non-monotonic indices is covered by a test. + + output from 1.1.3 is assumed to be the expected output. Output of sum/mean has + manually been verified. + + GH 36933. + """ + # Based on an example found in computation.rst + use_expanding = [True, False, True, False, True, True, True, True] + df = DataFrame({"values": np.arange(len(use_expanding)) ** 2}) + + class CustomIndexer(BaseIndexer): + def get_window_bounds(self, num_values, min_periods, center, closed, step): + start = np.empty(num_values, dtype=np.int64) + end = np.empty(num_values, dtype=np.int64) + for i in range(num_values): + if self.use_expanding[i]: + start[i] = 0 + end[i] = i + 1 + else: + start[i] = i + end[i] = i + self.window_size + return start, end + + indexer = CustomIndexer(window_size=4, use_expanding=use_expanding) + + result = getattr(df.rolling(indexer), method)() + expected = DataFrame({"values": expected}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + ("index", "window"), + [ + ([0, 1, 2, 3, 4], 2), + (date_range("2001-01-01", freq="D", periods=5), "2D"), + ], +) +def test_rolling_corr_timedelta_index(index, window): + # GH: 31286 + x = Series([1, 2, 3, 4, 5], index=index) + y = x.copy() + x.iloc[0:2] = 0.0 + result = x.rolling(window).corr(y) + expected = Series([np.nan, np.nan, 1, 1, 1], index=index) + tm.assert_almost_equal(result, expected) + + +def test_groupby_rolling_nan_included(): + # GH 35542 + data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]} + df = DataFrame(data) + result = df.groupby("group", dropna=False).rolling(1, min_periods=1).mean() + expected = DataFrame( + {"B": [0.0, 2.0, 3.0, 1.0, 4.0]}, + # GH-38057 from_tuples puts the NaNs in the codes, result expects them + # to be in the levels, at the moment + # index=MultiIndex.from_tuples( + # [("g1", 0), ("g1", 2), ("g2", 3), (np.nan, 1), (np.nan, 4)], + # names=["group", None], + # ), + index=MultiIndex( + [["g1", "g2", np.nan], [0, 1, 2, 3, 4]], + [[0, 0, 1, 2, 2], [0, 2, 3, 1, 4]], + names=["group", None], + ), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("method", ["skew", "kurt"]) +def test_rolling_skew_kurt_numerical_stability(method): + # GH#6929 + ser = Series(np.random.default_rng(2).random(10)) + ser_copy = ser.copy() + expected = getattr(ser.rolling(3), method)() + tm.assert_series_equal(ser, ser_copy) + ser = ser + 50000 + result = getattr(ser.rolling(3), method)() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + ("method", "values"), + [ + ("skew", [2.0, 0.854563, 0.0, 1.999984]), + ("kurt", [4.0, -1.289256, -1.2, 3.999946]), + ], +) +def test_rolling_skew_kurt_large_value_range(method, values): + # GH: 37557 + s = Series([3000000, 1, 1, 2, 3, 4, 999]) + result = getattr(s.rolling(4), method)() + expected = Series([np.nan] * 3 + values) + tm.assert_series_equal(result, expected) + + +def test_invalid_method(): + with pytest.raises(ValueError, match="method must be 'table' or 'single"): + Series(range(1)).rolling(1, method="foo") + + +@pytest.mark.parametrize("window", [1, "1d"]) +def test_rolling_descending_date_order_with_offset(window, frame_or_series): + # GH#40002 + idx = date_range(start="2020-01-01", end="2020-01-03", freq="1d") + obj = frame_or_series(range(1, 4), index=idx) + result = obj.rolling("1d", closed="left").sum() + expected = frame_or_series([np.nan, 1, 2], index=idx) + tm.assert_equal(result, expected) + + result = obj.iloc[::-1].rolling("1d", closed="left").sum() + idx = date_range(start="2020-01-03", end="2020-01-01", freq="-1d") + expected = frame_or_series([np.nan, 3, 2], index=idx) + tm.assert_equal(result, expected) + + +def test_rolling_var_floating_artifact_precision(): + # GH 37051 + s = Series([7, 5, 5, 5]) + result = s.rolling(3).var() + expected = Series([np.nan, np.nan, 4 / 3, 0]) + tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15) + # GH 42064 + # new `roll_var` will output 0.0 correctly + tm.assert_series_equal(result == 0, expected == 0) + + +def test_rolling_std_small_values(): + # GH 37051 + s = Series( + [ + 0.00000054, + 0.00000053, + 0.00000054, + ] + ) + result = s.rolling(2).std() + expected = Series([np.nan, 7.071068e-9, 7.071068e-9]) + tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15) + + +@pytest.mark.parametrize( + "start, exp_values", + [ + (1, [0.03, 0.0155, 0.0155, 0.011, 0.01025]), + (2, [0.001, 0.001, 0.0015, 0.00366666]), + ], +) +def test_rolling_mean_all_nan_window_floating_artifacts(start, exp_values): + # GH#41053 + df = DataFrame( + [ + 0.03, + 0.03, + 0.001, + np.nan, + 0.002, + 0.008, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + 0.005, + 0.2, + ] + ) + + values = exp_values + [ + 0.00366666, + 0.005, + 0.005, + 0.008, + np.nan, + np.nan, + 0.005, + 0.102500, + ] + expected = DataFrame( + values, + index=list(range(start, len(values) + start)), + ) + result = df.iloc[start:].rolling(5, min_periods=0).mean() + tm.assert_frame_equal(result, expected) + + +def test_rolling_sum_all_nan_window_floating_artifacts(): + # GH#41053 + df = DataFrame([0.002, 0.008, 0.005, np.nan, np.nan, np.nan]) + result = df.rolling(3, min_periods=0).sum() + expected = DataFrame([0.002, 0.010, 0.015, 0.013, 0.005, 0.0]) + tm.assert_frame_equal(result, expected) + + +def test_rolling_zero_window(): + # GH 22719 + s = Series(range(1)) + result = s.rolling(0).min() + expected = Series([np.nan]) + tm.assert_series_equal(result, expected) + + +def test_rolling_float_dtype(float_numpy_dtype): + # GH#42452 + df = DataFrame({"A": range(5), "B": range(10, 15)}, dtype=float_numpy_dtype) + expected = DataFrame( + {"A": [np.nan] * 5, "B": range(10, 20, 2)}, + dtype=float_numpy_dtype, + ) + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling(2, axis=1).sum() + tm.assert_frame_equal(result, expected, check_dtype=False) + + +def test_rolling_numeric_dtypes(): + # GH#41779 + df = DataFrame(np.arange(40).reshape(4, 10), columns=list("abcdefghij")).astype( + { + "a": "float16", + "b": "float32", + "c": "float64", + "d": "int8", + "e": "int16", + "f": "int32", + "g": "uint8", + "h": "uint16", + "i": "uint32", + "j": "uint64", + } + ) + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling(window=2, min_periods=1, axis=1).min() + expected = DataFrame( + { + "a": range(0, 40, 10), + "b": range(0, 40, 10), + "c": range(1, 40, 10), + "d": range(2, 40, 10), + "e": range(3, 40, 10), + "f": range(4, 40, 10), + "g": range(5, 40, 10), + "h": range(6, 40, 10), + "i": range(7, 40, 10), + "j": range(8, 40, 10), + }, + dtype="float64", + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("window", [1, 3, 10, 20]) +@pytest.mark.parametrize("method", ["min", "max", "average"]) +@pytest.mark.parametrize("pct", [True, False]) +@pytest.mark.parametrize("ascending", [True, False]) +@pytest.mark.parametrize("test_data", ["default", "duplicates", "nans"]) +def test_rank(window, method, pct, ascending, test_data): + length = 20 + if test_data == "default": + ser = Series(data=np.random.default_rng(2).random(length)) + elif test_data == "duplicates": + ser = Series(data=np.random.default_rng(2).choice(3, length)) + elif test_data == "nans": + ser = Series( + data=np.random.default_rng(2).choice( + [1.0, 0.25, 0.75, np.nan, np.inf, -np.inf], length + ) + ) + + expected = ser.rolling(window).apply( + lambda x: x.rank(method=method, pct=pct, ascending=ascending).iloc[-1] + ) + result = ser.rolling(window).rank(method=method, pct=pct, ascending=ascending) + + tm.assert_series_equal(result, expected) + + +def test_rolling_quantile_np_percentile(): + # #9413: Tests that rolling window's quantile default behavior + # is analogous to Numpy's percentile + row = 10 + col = 5 + idx = date_range("20100101", periods=row, freq="B") + df = DataFrame( + np.random.default_rng(2).random(row * col).reshape((row, -1)), index=idx + ) + + df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0) + np_percentile = np.percentile(df, [25, 50, 75], axis=0) + + tm.assert_almost_equal(df_quantile.values, np.array(np_percentile)) + + +@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1]) +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"] +) +@pytest.mark.parametrize( + "data", + [ + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], + [8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0], + [0.0, np.nan, 0.2, np.nan, 0.4], + [np.nan, np.nan, np.nan, np.nan], + [np.nan, 0.1, np.nan, 0.3, 0.4, 0.5], + [0.5], + [np.nan, 0.7, 0.6], + ], +) +def test_rolling_quantile_interpolation_options(quantile, interpolation, data): + # Tests that rolling window's quantile behavior is analogous to + # Series' quantile for each interpolation option + s = Series(data) + + q1 = s.quantile(quantile, interpolation) + q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1] + + if np.isnan(q1): + assert np.isnan(q2) + else: + if not IS64: + # Less precision on 32-bit + assert np.allclose([q1], [q2], rtol=1e-07, atol=0) + else: + assert q1 == q2 + + +def test_invalid_quantile_value(): + data = np.arange(5) + s = Series(data) + + msg = "Interpolation 'invalid' is not supported" + with pytest.raises(ValueError, match=msg): + s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid") + + +def test_rolling_quantile_param(): + ser = Series([0.0, 0.1, 0.5, 0.9, 1.0]) + msg = "quantile value -0.1 not in \\[0, 1\\]" + with pytest.raises(ValueError, match=msg): + ser.rolling(3).quantile(-0.1) + + msg = "quantile value 10.0 not in \\[0, 1\\]" + with pytest.raises(ValueError, match=msg): + ser.rolling(3).quantile(10.0) + + msg = "must be real number, not str" + with pytest.raises(TypeError, match=msg): + ser.rolling(3).quantile("foo") + + +def test_rolling_std_1obs(): + vals = Series([1.0, 2.0, 3.0, 4.0, 5.0]) + + result = vals.rolling(1, min_periods=1).std() + expected = Series([np.nan] * 5) + tm.assert_series_equal(result, expected) + + result = vals.rolling(1, min_periods=1).std(ddof=0) + expected = Series([0.0] * 5) + tm.assert_series_equal(result, expected) + + result = Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std() + assert np.isnan(result[2]) + + +def test_rolling_std_neg_sqrt(): + # unit test from Bottleneck + + # Test move_nanstd for neg sqrt. + + a = Series( + [ + 0.0011448196318903589, + 0.00028718669878572767, + 0.00028718669878572767, + 0.00028718669878572767, + 0.00028718669878572767, + ] + ) + b = a.rolling(window=3).std() + assert np.isfinite(b[2:]).all() + + b = a.ewm(span=3).std() + assert np.isfinite(b[2:]).all() + + +def test_step_not_integer_raises(): + with pytest.raises(ValueError, match="step must be an integer"): + DataFrame(range(2)).rolling(1, step="foo") + + +def test_step_not_positive_raises(): + with pytest.raises(ValueError, match="step must be >= 0"): + DataFrame(range(2)).rolling(1, step=-1) + + +@pytest.mark.parametrize( + ["values", "window", "min_periods", "expected"], + [ + [ + [20, 10, 10, np.inf, 1, 1, 2, 3], + 3, + 1, + [np.nan, 50, 100 / 3, 0, 40.5, 0, 1 / 3, 1], + ], + [ + [20, 10, 10, np.nan, 10, 1, 2, 3], + 3, + 1, + [np.nan, 50, 100 / 3, 0, 0, 40.5, 73 / 3, 1], + ], + [ + [np.nan, 5, 6, 7, 5, 5, 5], + 3, + 3, + [np.nan] * 3 + [1, 1, 4 / 3, 0], + ], + [ + [5, 7, 7, 7, np.nan, np.inf, 4, 3, 3, 3], + 3, + 3, + [np.nan] * 2 + [4 / 3, 0] + [np.nan] * 4 + [1 / 3, 0], + ], + [ + [5, 7, 7, 7, np.nan, np.inf, 7, 3, 3, 3], + 3, + 3, + [np.nan] * 2 + [4 / 3, 0] + [np.nan] * 4 + [16 / 3, 0], + ], + [ + [5, 7] * 4, + 3, + 3, + [np.nan] * 2 + [4 / 3] * 6, + ], + [ + [5, 7, 5, np.nan, 7, 5, 7], + 3, + 2, + [np.nan, 2, 4 / 3] + [2] * 3 + [4 / 3], + ], + ], +) +def test_rolling_var_same_value_count_logic(values, window, min_periods, expected): + # GH 42064. + + expected = Series(expected) + sr = Series(values) + + # With new algo implemented, result will be set to .0 in rolling var + # if sufficient amount of consecutively same values are found. + result_var = sr.rolling(window, min_periods=min_periods).var() + + # use `assert_series_equal` twice to check for equality, + # because `check_exact=True` will fail in 32-bit tests due to + # precision loss. + + # 1. result should be close to correct value + # non-zero values can still differ slightly from "truth" + # as the result of online algorithm + tm.assert_series_equal(result_var, expected) + # 2. zeros should be exactly the same since the new algo takes effect here + tm.assert_series_equal(expected == 0, result_var == 0) + + # std should also pass as it's just a sqrt of var + result_std = sr.rolling(window, min_periods=min_periods).std() + tm.assert_series_equal(result_std, np.sqrt(expected)) + tm.assert_series_equal(expected == 0, result_std == 0) + + +def test_rolling_mean_sum_floating_artifacts(): + # GH 42064. + + sr = Series([1 / 3, 4, 0, 0, 0, 0, 0]) + r = sr.rolling(3) + result = r.mean() + assert (result[-3:] == 0).all() + result = r.sum() + assert (result[-3:] == 0).all() + + +def test_rolling_skew_kurt_floating_artifacts(): + # GH 42064 46431 + + sr = Series([1 / 3, 4, 0, 0, 0, 0, 0]) + r = sr.rolling(4) + result = r.skew() + assert (result[-2:] == 0).all() + result = r.kurt() + assert (result[-2:] == -3).all() + + +def test_numeric_only_frame(arithmetic_win_operators, numeric_only): + # GH#46560 + kernel = arithmetic_win_operators + df = DataFrame({"a": [1], "b": 2, "c": 3}) + df["c"] = df["c"].astype(object) + rolling = df.rolling(2, min_periods=1) + op = getattr(rolling, kernel) + result = op(numeric_only=numeric_only) + + columns = ["a", "b"] if numeric_only else ["a", "b", "c"] + expected = df[columns].agg([kernel]).reset_index(drop=True).astype(float) + assert list(expected.columns) == columns + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("kernel", ["corr", "cov"]) +@pytest.mark.parametrize("use_arg", [True, False]) +def test_numeric_only_corr_cov_frame(kernel, numeric_only, use_arg): + # GH#46560 + df = DataFrame({"a": [1, 2, 3], "b": 2, "c": 3}) + df["c"] = df["c"].astype(object) + arg = (df,) if use_arg else () + rolling = df.rolling(2, min_periods=1) + op = getattr(rolling, kernel) + result = op(*arg, numeric_only=numeric_only) + + # Compare result to op using float dtypes, dropping c when numeric_only is True + columns = ["a", "b"] if numeric_only else ["a", "b", "c"] + df2 = df[columns].astype(float) + arg2 = (df2,) if use_arg else () + rolling2 = df2.rolling(2, min_periods=1) + op2 = getattr(rolling2, kernel) + expected = op2(*arg2, numeric_only=numeric_only) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [int, object]) +def test_numeric_only_series(arithmetic_win_operators, numeric_only, dtype): + # GH#46560 + kernel = arithmetic_win_operators + ser = Series([1], dtype=dtype) + rolling = ser.rolling(2, min_periods=1) + op = getattr(rolling, kernel) + if numeric_only and dtype is object: + msg = f"Rolling.{kernel} does not implement numeric_only" + with pytest.raises(NotImplementedError, match=msg): + op(numeric_only=numeric_only) + else: + result = op(numeric_only=numeric_only) + expected = ser.agg([kernel]).reset_index(drop=True).astype(float) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("kernel", ["corr", "cov"]) +@pytest.mark.parametrize("use_arg", [True, False]) +@pytest.mark.parametrize("dtype", [int, object]) +def test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype): + # GH#46560 + ser = Series([1, 2, 3], dtype=dtype) + arg = (ser,) if use_arg else () + rolling = ser.rolling(2, min_periods=1) + op = getattr(rolling, kernel) + if numeric_only and dtype is object: + msg = f"Rolling.{kernel} does not implement numeric_only" + with pytest.raises(NotImplementedError, match=msg): + op(*arg, numeric_only=numeric_only) + else: + result = op(*arg, numeric_only=numeric_only) + + ser2 = ser.astype(float) + arg2 = (ser2,) if use_arg else () + rolling2 = ser2.rolling(2, min_periods=1) + op2 = getattr(rolling2, kernel) + expected = op2(*arg2, numeric_only=numeric_only) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) +@pytest.mark.parametrize("tz", [None, "UTC", "Europe/Prague"]) +def test_rolling_timedelta_window_non_nanoseconds(unit, tz): + # Test Sum, GH#55106 + df_time = DataFrame( + {"A": range(5)}, index=date_range("2013-01-01", freq="1s", periods=5, tz=tz) + ) + sum_in_nanosecs = df_time.rolling("1s").sum() + # microseconds / milliseconds should not break the correct rolling + df_time.index = df_time.index.as_unit(unit) + sum_in_microsecs = df_time.rolling("1s").sum() + sum_in_microsecs.index = sum_in_microsecs.index.as_unit("ns") + tm.assert_frame_equal(sum_in_nanosecs, sum_in_microsecs) + + # Test max, GH#55026 + ref_dates = date_range("2023-01-01", "2023-01-10", unit="ns", tz=tz) + ref_series = Series(0, index=ref_dates) + ref_series.iloc[0] = 1 + ref_max_series = ref_series.rolling(Timedelta(days=4)).max() + + dates = date_range("2023-01-01", "2023-01-10", unit=unit, tz=tz) + series = Series(0, index=dates) + series.iloc[0] = 1 + max_series = series.rolling(Timedelta(days=4)).max() + + ref_df = DataFrame(ref_max_series) + df = DataFrame(max_series) + df.index = df.index.as_unit("ns") + + tm.assert_frame_equal(ref_df, df) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling_quantile.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling_quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..d5a7010923563c99b335fd9a309a1d8762d21651 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling_quantile.py @@ -0,0 +1,182 @@ +from functools import partial + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + concat, + isna, + notna, +) +import pandas._testing as tm + +from pandas.tseries import offsets + + +def scoreatpercentile(a, per): + values = np.sort(a, axis=0) + + idx = int(per / 1.0 * (values.shape[0] - 1)) + + if idx == values.shape[0] - 1: + retval = values[-1] + + else: + qlow = idx / (values.shape[0] - 1) + qhig = (idx + 1) / (values.shape[0] - 1) + vlow = values[idx] + vhig = values[idx + 1] + retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow) + + return retval + + +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_series(series, q, step): + compare_func = partial(scoreatpercentile, per=q) + result = series.rolling(50, step=step).quantile(q) + assert isinstance(result, Series) + end = range(0, len(series), step or 1)[-1] + 1 + tm.assert_almost_equal(result.iloc[-1], compare_func(series[end - 50 : end])) + + +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_frame(raw, frame, q, step): + compare_func = partial(scoreatpercentile, per=q) + result = frame.rolling(50, step=step).quantile(q) + assert isinstance(result, DataFrame) + end = range(0, len(frame), step or 1)[-1] + 1 + tm.assert_series_equal( + result.iloc[-1, :], + frame.iloc[end - 50 : end, :].apply(compare_func, axis=0, raw=raw), + check_names=False, + ) + + +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_time_rule_series(series, q): + compare_func = partial(scoreatpercentile, per=q) + win = 25 + ser = series[::2].resample("B").mean() + series_result = ser.rolling(window=win, min_periods=10).quantile(q) + last_date = series_result.index[-1] + prev_date = last_date - 24 * offsets.BDay() + + trunc_series = series[::2].truncate(prev_date, last_date) + tm.assert_almost_equal(series_result.iloc[-1], compare_func(trunc_series)) + + +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_time_rule_frame(raw, frame, q): + compare_func = partial(scoreatpercentile, per=q) + win = 25 + frm = frame[::2].resample("B").mean() + frame_result = frm.rolling(window=win, min_periods=10).quantile(q) + last_date = frame_result.index[-1] + prev_date = last_date - 24 * offsets.BDay() + + trunc_frame = frame[::2].truncate(prev_date, last_date) + tm.assert_series_equal( + frame_result.xs(last_date), + trunc_frame.apply(compare_func, raw=raw), + check_names=False, + ) + + +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_nans(q): + compare_func = partial(scoreatpercentile, per=q) + obj = Series(np.random.default_rng(2).standard_normal(50)) + obj[:10] = np.nan + obj[-10:] = np.nan + + result = obj.rolling(50, min_periods=30).quantile(q) + tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10])) + + # min_periods is working correctly + result = obj.rolling(20, min_periods=15).quantile(q) + assert isna(result.iloc[23]) + assert not isna(result.iloc[24]) + + assert not isna(result.iloc[-6]) + assert isna(result.iloc[-5]) + + obj2 = Series(np.random.default_rng(2).standard_normal(20)) + result = obj2.rolling(10, min_periods=5).quantile(q) + assert isna(result.iloc[3]) + assert notna(result.iloc[4]) + + result0 = obj.rolling(20, min_periods=0).quantile(q) + result1 = obj.rolling(20, min_periods=1).quantile(q) + tm.assert_almost_equal(result0, result1) + + +@pytest.mark.parametrize("minp", [0, 99, 100]) +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_min_periods(series, minp, q, step): + result = series.rolling(len(series) + 1, min_periods=minp, step=step).quantile(q) + expected = series.rolling(len(series), min_periods=minp, step=step).quantile(q) + nan_mask = isna(result) + tm.assert_series_equal(nan_mask, isna(expected)) + + nan_mask = ~nan_mask + tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) + + +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_center(q): + obj = Series(np.random.default_rng(2).standard_normal(50)) + obj[:10] = np.nan + obj[-10:] = np.nan + + result = obj.rolling(20, center=True).quantile(q) + expected = ( + concat([obj, Series([np.nan] * 9)]) + .rolling(20) + .quantile(q) + .iloc[9:] + .reset_index(drop=True) + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_center_reindex_series(series, q): + # shifter index + s = [f"x{x:d}" for x in range(12)] + + series_xp = ( + series.reindex(list(series.index) + s) + .rolling(window=25) + .quantile(q) + .shift(-12) + .reindex(series.index) + ) + + series_rs = series.rolling(window=25, center=True).quantile(q) + tm.assert_series_equal(series_xp, series_rs) + + +@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0]) +def test_center_reindex_frame(frame, q): + # shifter index + s = [f"x{x:d}" for x in range(12)] + + frame_xp = ( + frame.reindex(list(frame.index) + s) + .rolling(window=25) + .quantile(q) + .shift(-12) + .reindex(frame.index) + ) + frame_rs = frame.rolling(window=25, center=True).quantile(q) + tm.assert_frame_equal(frame_xp, frame_rs) + + +def test_keyword_quantile_deprecated(): + # GH #52550 + s = Series([1, 2, 3, 4]) + with tm.assert_produces_warning(FutureWarning): + s.rolling(2).quantile(quantile=0.4) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling_skew_kurt.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling_skew_kurt.py new file mode 100644 index 0000000000000000000000000000000000000000..79c14f243e7cc93b395ea84e05ec6bc79942b79b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_rolling_skew_kurt.py @@ -0,0 +1,227 @@ +from functools import partial + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + concat, + isna, + notna, +) +import pandas._testing as tm + +from pandas.tseries import offsets + + +@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) +def test_series(series, sp_func, roll_func): + sp_stats = pytest.importorskip("scipy.stats") + + compare_func = partial(getattr(sp_stats, sp_func), bias=False) + result = getattr(series.rolling(50), roll_func)() + assert isinstance(result, Series) + tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:])) + + +@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) +def test_frame(raw, frame, sp_func, roll_func): + sp_stats = pytest.importorskip("scipy.stats") + + compare_func = partial(getattr(sp_stats, sp_func), bias=False) + result = getattr(frame.rolling(50), roll_func)() + assert isinstance(result, DataFrame) + tm.assert_series_equal( + result.iloc[-1, :], + frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw), + check_names=False, + ) + + +@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) +def test_time_rule_series(series, sp_func, roll_func): + sp_stats = pytest.importorskip("scipy.stats") + + compare_func = partial(getattr(sp_stats, sp_func), bias=False) + win = 25 + ser = series[::2].resample("B").mean() + series_result = getattr(ser.rolling(window=win, min_periods=10), roll_func)() + last_date = series_result.index[-1] + prev_date = last_date - 24 * offsets.BDay() + + trunc_series = series[::2].truncate(prev_date, last_date) + tm.assert_almost_equal(series_result.iloc[-1], compare_func(trunc_series)) + + +@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) +def test_time_rule_frame(raw, frame, sp_func, roll_func): + sp_stats = pytest.importorskip("scipy.stats") + + compare_func = partial(getattr(sp_stats, sp_func), bias=False) + win = 25 + frm = frame[::2].resample("B").mean() + frame_result = getattr(frm.rolling(window=win, min_periods=10), roll_func)() + last_date = frame_result.index[-1] + prev_date = last_date - 24 * offsets.BDay() + + trunc_frame = frame[::2].truncate(prev_date, last_date) + tm.assert_series_equal( + frame_result.xs(last_date), + trunc_frame.apply(compare_func, raw=raw), + check_names=False, + ) + + +@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]]) +def test_nans(sp_func, roll_func): + sp_stats = pytest.importorskip("scipy.stats") + + compare_func = partial(getattr(sp_stats, sp_func), bias=False) + obj = Series(np.random.default_rng(2).standard_normal(50)) + obj[:10] = np.nan + obj[-10:] = np.nan + + result = getattr(obj.rolling(50, min_periods=30), roll_func)() + tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10])) + + # min_periods is working correctly + result = getattr(obj.rolling(20, min_periods=15), roll_func)() + assert isna(result.iloc[23]) + assert not isna(result.iloc[24]) + + assert not isna(result.iloc[-6]) + assert isna(result.iloc[-5]) + + obj2 = Series(np.random.default_rng(2).standard_normal(20)) + result = getattr(obj2.rolling(10, min_periods=5), roll_func)() + assert isna(result.iloc[3]) + assert notna(result.iloc[4]) + + result0 = getattr(obj.rolling(20, min_periods=0), roll_func)() + result1 = getattr(obj.rolling(20, min_periods=1), roll_func)() + tm.assert_almost_equal(result0, result1) + + +@pytest.mark.parametrize("minp", [0, 99, 100]) +@pytest.mark.parametrize("roll_func", ["kurt", "skew"]) +def test_min_periods(series, minp, roll_func, step): + result = getattr( + series.rolling(len(series) + 1, min_periods=minp, step=step), roll_func + )() + expected = getattr( + series.rolling(len(series), min_periods=minp, step=step), roll_func + )() + nan_mask = isna(result) + tm.assert_series_equal(nan_mask, isna(expected)) + + nan_mask = ~nan_mask + tm.assert_almost_equal(result[nan_mask], expected[nan_mask]) + + +@pytest.mark.parametrize("roll_func", ["kurt", "skew"]) +def test_center(roll_func): + obj = Series(np.random.default_rng(2).standard_normal(50)) + obj[:10] = np.nan + obj[-10:] = np.nan + + result = getattr(obj.rolling(20, center=True), roll_func)() + expected = ( + getattr(concat([obj, Series([np.nan] * 9)]).rolling(20), roll_func)() + .iloc[9:] + .reset_index(drop=True) + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("roll_func", ["kurt", "skew"]) +def test_center_reindex_series(series, roll_func): + # shifter index + s = [f"x{x:d}" for x in range(12)] + + series_xp = ( + getattr( + series.reindex(list(series.index) + s).rolling(window=25), + roll_func, + )() + .shift(-12) + .reindex(series.index) + ) + series_rs = getattr(series.rolling(window=25, center=True), roll_func)() + tm.assert_series_equal(series_xp, series_rs) + + +@pytest.mark.slow +@pytest.mark.parametrize("roll_func", ["kurt", "skew"]) +def test_center_reindex_frame(frame, roll_func): + # shifter index + s = [f"x{x:d}" for x in range(12)] + + frame_xp = ( + getattr( + frame.reindex(list(frame.index) + s).rolling(window=25), + roll_func, + )() + .shift(-12) + .reindex(frame.index) + ) + frame_rs = getattr(frame.rolling(window=25, center=True), roll_func)() + tm.assert_frame_equal(frame_xp, frame_rs) + + +def test_rolling_skew_edge_cases(step): + expected = Series([np.nan] * 4 + [0.0])[::step] + # yields all NaN (0 variance) + d = Series([1] * 5) + x = d.rolling(window=5, step=step).skew() + # index 4 should be 0 as it contains 5 same obs + tm.assert_series_equal(expected, x) + + expected = Series([np.nan] * 5)[::step] + # yields all NaN (window too small) + d = Series(np.random.default_rng(2).standard_normal(5)) + x = d.rolling(window=2, step=step).skew() + tm.assert_series_equal(expected, x) + + # yields [NaN, NaN, NaN, 0.177994, 1.548824] + d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401]) + expected = Series([np.nan, np.nan, np.nan, 0.177994, 1.548824])[::step] + x = d.rolling(window=4, step=step).skew() + tm.assert_series_equal(expected, x) + + +def test_rolling_kurt_edge_cases(step): + expected = Series([np.nan] * 4 + [-3.0])[::step] + + # yields all NaN (0 variance) + d = Series([1] * 5) + x = d.rolling(window=5, step=step).kurt() + tm.assert_series_equal(expected, x) + + # yields all NaN (window too small) + expected = Series([np.nan] * 5)[::step] + d = Series(np.random.default_rng(2).standard_normal(5)) + x = d.rolling(window=3, step=step).kurt() + tm.assert_series_equal(expected, x) + + # yields [NaN, NaN, NaN, 1.224307, 2.671499] + d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401]) + expected = Series([np.nan, np.nan, np.nan, 1.224307, 2.671499])[::step] + x = d.rolling(window=4, step=step).kurt() + tm.assert_series_equal(expected, x) + + +def test_rolling_skew_eq_value_fperr(step): + # #18804 all rolling skew for all equal values should return Nan + # #46717 update: all equal values should return 0 instead of NaN + a = Series([1.1] * 15).rolling(window=10, step=step).skew() + assert (a[a.index >= 9] == 0).all() + assert a[a.index < 9].isna().all() + + +def test_rolling_kurt_eq_value_fperr(step): + # #18804 all rolling kurt for all equal values should return Nan + # #46717 update: all equal values should return -3 instead of NaN + a = Series([1.1] * 15).rolling(window=10, step=step).kurt() + assert (a[a.index >= 9] == -3).all() + assert a[a.index < 9].isna().all() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_timeseries_window.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_timeseries_window.py new file mode 100644 index 0000000000000000000000000000000000000000..bd0fadeb3e47565b6bad3e2e96dd7962b0b31d07 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_timeseries_window.py @@ -0,0 +1,715 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + MultiIndex, + NaT, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + +from pandas.tseries import offsets + + +@pytest.fixture +def regular(): + return DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)} + ).set_index("A") + + +@pytest.fixture +def ragged(): + df = DataFrame({"B": range(5)}) + df.index = [ + Timestamp("20130101 09:00:00"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:05"), + Timestamp("20130101 09:00:06"), + ] + return df + + +class TestRollingTS: + # rolling time-series friendly + # xref GH13327 + + def test_doc_string(self): + df = DataFrame( + {"B": [0, 1, 2, np.nan, 4]}, + index=[ + Timestamp("20130101 09:00:00"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:05"), + Timestamp("20130101 09:00:06"), + ], + ) + df + df.rolling("2s").sum() + + def test_invalid_window_non_int(self, regular): + # not a valid freq + msg = "passed window foobar is not compatible with a datetimelike index" + with pytest.raises(ValueError, match=msg): + regular.rolling(window="foobar") + # not a datetimelike index + msg = "window must be an integer" + with pytest.raises(ValueError, match=msg): + regular.reset_index().rolling(window="foobar") + + @pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)]) + def test_invalid_window_nonfixed(self, freq, regular): + # non-fixed freqs + msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency" + with pytest.raises(ValueError, match=msg): + regular.rolling(window=freq) + + @pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"]) + def test_valid_window(self, freq, regular): + regular.rolling(window=freq) + + @pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])]) + def test_invalid_minp(self, minp, regular): + # non-integer min_periods + msg = ( + r"local variable 'minp' referenced before assignment|" + "min_periods must be an integer" + ) + with pytest.raises(ValueError, match=msg): + regular.rolling(window="1D", min_periods=minp) + + def test_on(self, regular): + df = regular + + # not a valid column + msg = ( + r"invalid on specified as foobar, must be a column " + "\\(of DataFrame\\), an Index or None" + ) + with pytest.raises(ValueError, match=msg): + df.rolling(window="2s", on="foobar") + + # column is valid + df = df.copy() + df["C"] = date_range("20130101", periods=len(df)) + df.rolling(window="2d", on="C").sum() + + # invalid columns + msg = "window must be an integer" + with pytest.raises(ValueError, match=msg): + df.rolling(window="2d", on="B") + + # ok even though on non-selected + df.rolling(window="2d", on="C").B.sum() + + def test_monotonic_on(self): + # on/index must be monotonic + df = DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)} + ) + + assert df.A.is_monotonic_increasing + df.rolling("2s", on="A").sum() + + df = df.set_index("A") + assert df.index.is_monotonic_increasing + df.rolling("2s").sum() + + def test_non_monotonic_on(self): + # GH 19248 + df = DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)} + ) + df = df.set_index("A") + non_monotonic_index = df.index.to_list() + non_monotonic_index[0] = non_monotonic_index[3] + df.index = non_monotonic_index + + assert not df.index.is_monotonic_increasing + + msg = "index values must be monotonic" + with pytest.raises(ValueError, match=msg): + df.rolling("2s").sum() + + df = df.reset_index() + + msg = ( + r"invalid on specified as A, must be a column " + "\\(of DataFrame\\), an Index or None" + ) + with pytest.raises(ValueError, match=msg): + df.rolling("2s", on="A").sum() + + def test_frame_on(self): + df = DataFrame( + {"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")} + ) + + df["A"] = [ + Timestamp("20130101 09:00:00"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:05"), + Timestamp("20130101 09:00:06"), + ] + + # we are doing simulating using 'on' + expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True) + + result = df.rolling("2s", on="A").B.sum() + tm.assert_series_equal(result, expected) + + # test as a frame + # we should be ignoring the 'on' as an aggregation column + # note that the expected is setting, computing, and resetting + # so the columns need to be switched compared + # to the actual result where they are ordered as in the + # original + expected = ( + df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]] + ) + + result = df.rolling("2s", on="A")[["B"]].sum() + tm.assert_frame_equal(result, expected) + + def test_frame_on2(self, unit): + # using multiple aggregation columns + dti = DatetimeIndex( + [ + Timestamp("20130101 09:00:00"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:05"), + Timestamp("20130101 09:00:06"), + ] + ).as_unit(unit) + df = DataFrame( + { + "A": [0, 1, 2, 3, 4], + "B": [0, 1, 2, np.nan, 4], + "C": dti, + }, + columns=["A", "C", "B"], + ) + + expected1 = DataFrame( + {"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]}, + columns=["A", "C", "B"], + ) + + result = df.rolling("2s", on="C").sum() + expected = expected1 + tm.assert_frame_equal(result, expected) + + expected = Series([0, 1, 3, np.nan, 4], name="B") + result = df.rolling("2s", on="C").B.sum() + tm.assert_series_equal(result, expected) + + expected = expected1[["A", "B", "C"]] + result = df.rolling("2s", on="C")[["A", "B", "C"]].sum() + tm.assert_frame_equal(result, expected) + + def test_basic_regular(self, regular): + df = regular.copy() + + df.index = date_range("20130101", periods=5, freq="D") + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window="1D").sum() + tm.assert_frame_equal(result, expected) + + df.index = date_range("20130101", periods=5, freq="2D") + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window="2D", min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(window=1, min_periods=1).sum() + result = df.rolling(window="2D", min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(window=1).sum() + result = df.rolling(window="2D").sum() + tm.assert_frame_equal(result, expected) + + def test_min_periods(self, regular): + # compare for min_periods + df = regular + + # these slightly different + expected = df.rolling(2, min_periods=1).sum() + result = df.rolling("2s").sum() + tm.assert_frame_equal(result, expected) + + expected = df.rolling(2, min_periods=1).sum() + result = df.rolling("2s", min_periods=1).sum() + tm.assert_frame_equal(result, expected) + + def test_closed(self, regular, unit): + # xref GH13965 + + dti = DatetimeIndex( + [ + Timestamp("20130101 09:00:01"), + Timestamp("20130101 09:00:02"), + Timestamp("20130101 09:00:03"), + Timestamp("20130101 09:00:04"), + Timestamp("20130101 09:00:06"), + ] + ).as_unit(unit) + + df = DataFrame( + {"A": [1] * 5}, + index=dti, + ) + + # closed must be 'right', 'left', 'both', 'neither' + msg = "closed must be 'right', 'left', 'both' or 'neither'" + with pytest.raises(ValueError, match=msg): + regular.rolling(window="2s", closed="blabla") + + expected = df.copy() + expected["A"] = [1.0, 2, 2, 2, 1] + result = df.rolling("2s", closed="right").sum() + tm.assert_frame_equal(result, expected) + + # default should be 'right' + result = df.rolling("2s").sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [1.0, 2, 3, 3, 2] + result = df.rolling("2s", closed="both").sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [np.nan, 1.0, 2, 2, 1] + result = df.rolling("2s", closed="left").sum() + tm.assert_frame_equal(result, expected) + + expected = df.copy() + expected["A"] = [np.nan, 1.0, 1, 1, np.nan] + result = df.rolling("2s", closed="neither").sum() + tm.assert_frame_equal(result, expected) + + def test_ragged_sum(self, ragged): + df = ragged + result = df.rolling(window="1s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 3, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=2).sum() + expected = df.copy() + expected["B"] = [np.nan, np.nan, 3, np.nan, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="3s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 5, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="3s").sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 5, 7] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="4s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 6, 9] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="4s", min_periods=3).sum() + expected = df.copy() + expected["B"] = [np.nan, np.nan, 3, 6, 9] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).sum() + expected = df.copy() + expected["B"] = [0.0, 1, 3, 6, 10] + tm.assert_frame_equal(result, expected) + + def test_ragged_mean(self, ragged): + df = ragged + result = df.rolling(window="1s", min_periods=1).mean() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).mean() + expected = df.copy() + expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_median(self, ragged): + df = ragged + result = df.rolling(window="1s", min_periods=1).median() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).median() + expected = df.copy() + expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_quantile(self, ragged): + df = ragged + result = df.rolling(window="1s", min_periods=1).quantile(0.5) + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).quantile(0.5) + expected = df.copy() + expected["B"] = [0.0, 1, 1.5, 3.0, 3.5] + tm.assert_frame_equal(result, expected) + + def test_ragged_std(self, ragged): + df = ragged + result = df.rolling(window="1s", min_periods=1).std(ddof=0) + expected = df.copy() + expected["B"] = [0.0] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="1s", min_periods=1).std(ddof=1) + expected = df.copy() + expected["B"] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="3s", min_periods=1).std(ddof=0) + expected = df.copy() + expected["B"] = [0.0] + [0.5] * 4 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).std(ddof=1) + expected = df.copy() + expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994] + tm.assert_frame_equal(result, expected) + + def test_ragged_var(self, ragged): + df = ragged + result = df.rolling(window="1s", min_periods=1).var(ddof=0) + expected = df.copy() + expected["B"] = [0.0] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="1s", min_periods=1).var(ddof=1) + expected = df.copy() + expected["B"] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="3s", min_periods=1).var(ddof=0) + expected = df.copy() + expected["B"] = [0.0] + [0.25] * 4 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).var(ddof=1) + expected = df.copy() + expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0] + tm.assert_frame_equal(result, expected) + + def test_ragged_skew(self, ragged): + df = ragged + result = df.rolling(window="3s", min_periods=1).skew() + expected = df.copy() + expected["B"] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).skew() + expected = df.copy() + expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0] + tm.assert_frame_equal(result, expected) + + def test_ragged_kurt(self, ragged): + df = ragged + result = df.rolling(window="3s", min_periods=1).kurt() + expected = df.copy() + expected["B"] = [np.nan] * 5 + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).kurt() + expected = df.copy() + expected["B"] = [np.nan] * 4 + [-1.2] + tm.assert_frame_equal(result, expected) + + def test_ragged_count(self, ragged): + df = ragged + result = df.rolling(window="1s", min_periods=1).count() + expected = df.copy() + expected["B"] = [1.0, 1, 1, 1, 1] + tm.assert_frame_equal(result, expected) + + df = ragged + result = df.rolling(window="1s").count() + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).count() + expected = df.copy() + expected["B"] = [1.0, 1, 2, 1, 2] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=2).count() + expected = df.copy() + expected["B"] = [np.nan, np.nan, 2, np.nan, 2] + tm.assert_frame_equal(result, expected) + + def test_regular_min(self): + df = DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": [0.0, 1, 2, 3, 4]} + ).set_index("A") + result = df.rolling("1s").min() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + df = DataFrame( + {"A": date_range("20130101", periods=5, freq="s"), "B": [5, 4, 3, 4, 5]} + ).set_index("A") + + tm.assert_frame_equal(result, expected) + result = df.rolling("2s").min() + expected = df.copy() + expected["B"] = [5.0, 4, 3, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling("5s").min() + expected = df.copy() + expected["B"] = [5.0, 4, 3, 3, 3] + tm.assert_frame_equal(result, expected) + + def test_ragged_min(self, ragged): + df = ragged + + result = df.rolling(window="1s", min_periods=1).min() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).min() + expected = df.copy() + expected["B"] = [0.0, 1, 1, 3, 3] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).min() + expected = df.copy() + expected["B"] = [0.0, 0, 0, 1, 1] + tm.assert_frame_equal(result, expected) + + def test_perf_min(self): + N = 10000 + + dfp = DataFrame( + {"B": np.random.default_rng(2).standard_normal(N)}, + index=date_range("20130101", periods=N, freq="s"), + ) + expected = dfp.rolling(2, min_periods=1).min() + result = dfp.rolling("2s").min() + assert ((result - expected) < 0.01).all().all() + + expected = dfp.rolling(200, min_periods=1).min() + result = dfp.rolling("200s").min() + assert ((result - expected) < 0.01).all().all() + + def test_ragged_max(self, ragged): + df = ragged + + result = df.rolling(window="1s", min_periods=1).max() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="2s", min_periods=1).max() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + result = df.rolling(window="5s", min_periods=1).max() + expected = df.copy() + expected["B"] = [0.0, 1, 2, 3, 4] + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "freq, op, result_data", + [ + ("ms", "min", [0.0] * 10), + ("ms", "mean", [0.0] * 9 + [2.0 / 9]), + ("ms", "max", [0.0] * 9 + [2.0]), + ("s", "min", [0.0] * 10), + ("s", "mean", [0.0] * 9 + [2.0 / 9]), + ("s", "max", [0.0] * 9 + [2.0]), + ("min", "min", [0.0] * 10), + ("min", "mean", [0.0] * 9 + [2.0 / 9]), + ("min", "max", [0.0] * 9 + [2.0]), + ("h", "min", [0.0] * 10), + ("h", "mean", [0.0] * 9 + [2.0 / 9]), + ("h", "max", [0.0] * 9 + [2.0]), + ("D", "min", [0.0] * 10), + ("D", "mean", [0.0] * 9 + [2.0 / 9]), + ("D", "max", [0.0] * 9 + [2.0]), + ], + ) + def test_freqs_ops(self, freq, op, result_data): + # GH 21096 + index = date_range(start="2018-1-1 01:00:00", freq=f"1{freq}", periods=10) + # Explicit cast to float to avoid implicit cast when setting nan + s = Series(data=0, index=index, dtype="float") + s.iloc[1] = np.nan + s.iloc[-1] = 2 + result = getattr(s.rolling(window=f"10{freq}"), op)() + expected = Series(data=result_data, index=index) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "f", + [ + "sum", + "mean", + "count", + "median", + "std", + "var", + "kurt", + "skew", + "min", + "max", + ], + ) + def test_all(self, f, regular): + # simple comparison of integer vs time-based windowing + df = regular * 2 + er = df.rolling(window=1) + r = df.rolling(window="1s") + + result = getattr(r, f)() + expected = getattr(er, f)() + tm.assert_frame_equal(result, expected) + + result = r.quantile(0.5) + expected = er.quantile(0.5) + tm.assert_frame_equal(result, expected) + + def test_all2(self, arithmetic_win_operators): + f = arithmetic_win_operators + # more sophisticated comparison of integer vs. + # time-based windowing + df = DataFrame( + {"B": np.arange(50)}, index=date_range("20130101", periods=50, freq="h") + ) + # in-range data + dft = df.between_time("09:00", "16:00") + + r = dft.rolling(window="5h") + + result = getattr(r, f)() + + # we need to roll the days separately + # to compare with a time-based roll + # finally groupby-apply will return a multi-index + # so we need to drop the day + def agg_by_day(x): + x = x.between_time("09:00", "16:00") + return getattr(x.rolling(5, min_periods=1), f)() + + expected = ( + df.groupby(df.index.day).apply(agg_by_day).reset_index(level=0, drop=True) + ) + + tm.assert_frame_equal(result, expected) + + def test_rolling_cov_offset(self): + # GH16058 + + idx = date_range("2017-01-01", periods=24, freq="1h") + ss = Series(np.arange(len(idx)), index=idx) + + result = ss.rolling("2h").cov() + expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx) + tm.assert_series_equal(result, expected) + + expected2 = ss.rolling(2, min_periods=1).cov() + tm.assert_series_equal(result, expected2) + + result = ss.rolling("3h").cov() + expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx) + tm.assert_series_equal(result, expected) + + expected2 = ss.rolling(3, min_periods=1).cov() + tm.assert_series_equal(result, expected2) + + def test_rolling_on_decreasing_index(self, unit): + # GH-19248, GH-32385 + index = DatetimeIndex( + [ + Timestamp("20190101 09:00:30"), + Timestamp("20190101 09:00:27"), + Timestamp("20190101 09:00:20"), + Timestamp("20190101 09:00:18"), + Timestamp("20190101 09:00:10"), + ] + ).as_unit(unit) + + df = DataFrame({"column": [3, 4, 4, 5, 6]}, index=index) + result = df.rolling("5s").min() + expected = DataFrame({"column": [3.0, 3.0, 4.0, 4.0, 6.0]}, index=index) + tm.assert_frame_equal(result, expected) + + def test_rolling_on_empty(self): + # GH-32385 + df = DataFrame({"column": []}, index=[]) + result = df.rolling("5s").min() + expected = DataFrame({"column": []}, index=[]) + tm.assert_frame_equal(result, expected) + + def test_rolling_on_multi_index_level(self): + # GH-15584 + df = DataFrame( + {"column": range(6)}, + index=MultiIndex.from_product( + [date_range("20190101", periods=3), range(2)], names=["date", "seq"] + ), + ) + result = df.rolling("10d", on=df.index.get_level_values("date")).sum() + expected = DataFrame( + {"column": [0.0, 1.0, 3.0, 6.0, 10.0, 15.0]}, index=df.index + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("msg, axis", [["column", 1], ["index", 0]]) +def test_nat_axis_error(msg, axis): + idx = [Timestamp("2020"), NaT] + kwargs = {"columns" if axis == 1 else "index": idx} + df = DataFrame(np.eye(2), **kwargs) + warn_msg = "The 'axis' keyword in DataFrame.rolling is deprecated" + if axis == 1: + warn_msg = "Support for axis=1 in DataFrame.rolling is deprecated" + with pytest.raises(ValueError, match=f"{msg} values must not have NaT"): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + df.rolling("D", axis=axis).mean() + + +@td.skip_if_no("pyarrow") +def test_arrow_datetime_axis(): + # GH 55849 + expected = Series( + np.arange(5, dtype=np.float64), + index=Index( + date_range("2020-01-01", periods=5), dtype="timestamp[ns][pyarrow]" + ), + ) + result = expected.rolling("1D").sum() + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/window/test_win_type.py b/venv/lib/python3.10/site-packages/pandas/tests/window/test_win_type.py new file mode 100644 index 0000000000000000000000000000000000000000..5052019ddb7264c4f81e99ccdd79d88d86865ec4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/window/test_win_type.py @@ -0,0 +1,688 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + Timedelta, + concat, + date_range, +) +import pandas._testing as tm +from pandas.api.indexers import BaseIndexer + + +@pytest.fixture( + params=[ + "triang", + "blackman", + "hamming", + "bartlett", + "bohman", + "blackmanharris", + "nuttall", + "barthann", + ] +) +def win_types(request): + return request.param + + +@pytest.fixture(params=["kaiser", "gaussian", "general_gaussian", "exponential"]) +def win_types_special(request): + return request.param + + +def test_constructor(frame_or_series): + # GH 12669 + pytest.importorskip("scipy") + c = frame_or_series(range(5)).rolling + + # valid + c(win_type="boxcar", window=2, min_periods=1) + c(win_type="boxcar", window=2, min_periods=1, center=True) + c(win_type="boxcar", window=2, min_periods=1, center=False) + + +@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])]) +def test_invalid_constructor(frame_or_series, w): + # not valid + pytest.importorskip("scipy") + c = frame_or_series(range(5)).rolling + with pytest.raises(ValueError, match="min_periods must be an integer"): + c(win_type="boxcar", window=2, min_periods=w) + with pytest.raises(ValueError, match="center must be a boolean"): + c(win_type="boxcar", window=2, min_periods=1, center=w) + + +@pytest.mark.parametrize("wt", ["foobar", 1]) +def test_invalid_constructor_wintype(frame_or_series, wt): + pytest.importorskip("scipy") + c = frame_or_series(range(5)).rolling + with pytest.raises(ValueError, match="Invalid win_type"): + c(win_type=wt, window=2) + + +def test_constructor_with_win_type(frame_or_series, win_types): + # GH 12669 + pytest.importorskip("scipy") + c = frame_or_series(range(5)).rolling + c(win_type=win_types, window=2) + + +@pytest.mark.parametrize("arg", ["median", "kurt", "skew"]) +def test_agg_function_support(arg): + pytest.importorskip("scipy") + df = DataFrame({"A": np.arange(5)}) + roll = df.rolling(2, win_type="triang") + + msg = f"'{arg}' is not a valid function for 'Window' object" + with pytest.raises(AttributeError, match=msg): + roll.agg(arg) + + with pytest.raises(AttributeError, match=msg): + roll.agg([arg]) + + with pytest.raises(AttributeError, match=msg): + roll.agg({"A": arg}) + + +def test_invalid_scipy_arg(): + # This error is raised by scipy + pytest.importorskip("scipy") + msg = r"boxcar\(\) got an unexpected" + with pytest.raises(TypeError, match=msg): + Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar") + + +def test_constructor_with_win_type_invalid(frame_or_series): + # GH 13383 + pytest.importorskip("scipy") + c = frame_or_series(range(5)).rolling + + msg = "window must be an integer 0 or greater" + + with pytest.raises(ValueError, match=msg): + c(-1, win_type="boxcar") + + +def test_window_with_args(step): + # make sure that we are aggregating window functions correctly with arg + pytest.importorskip("scipy") + r = Series(np.random.default_rng(2).standard_normal(100)).rolling( + window=10, min_periods=1, win_type="gaussian", step=step + ) + expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) + expected.columns = ["", ""] + result = r.aggregate([lambda x: x.mean(std=10), lambda x: x.mean(std=0.01)]) + tm.assert_frame_equal(result, expected) + + def a(x): + return x.mean(std=10) + + def b(x): + return x.mean(std=0.01) + + expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1) + expected.columns = ["a", "b"] + result = r.aggregate([a, b]) + tm.assert_frame_equal(result, expected) + + +def test_win_type_with_method_invalid(): + pytest.importorskip("scipy") + with pytest.raises( + NotImplementedError, match="'single' is the only supported method type." + ): + Series(range(1)).rolling(1, win_type="triang", method="table") + + +@pytest.mark.parametrize("arg", [2000000000, "2s", Timedelta("2s")]) +def test_consistent_win_type_freq(arg): + # GH 15969 + pytest.importorskip("scipy") + s = Series(range(1)) + with pytest.raises(ValueError, match="Invalid win_type freq"): + s.rolling(arg, win_type="freq") + + +def test_win_type_freq_return_none(): + # GH 48838 + freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s") + assert freq_roll.win_type is None + + +def test_win_type_not_implemented(): + pytest.importorskip("scipy") + + class CustomIndexer(BaseIndexer): + def get_window_bounds(self, num_values, min_periods, center, closed, step): + return np.array([0, 1]), np.array([1, 2]) + + df = DataFrame({"values": range(2)}) + indexer = CustomIndexer() + with pytest.raises(NotImplementedError, match="BaseIndexer subclasses not"): + df.rolling(indexer, win_type="boxcar") + + +def test_cmov_mean(step): + # GH 8238 + pytest.importorskip("scipy") + vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) + result = Series(vals).rolling(5, center=True, step=step).mean() + expected_values = [ + np.nan, + np.nan, + 9.962, + 11.27, + 11.564, + 12.516, + 12.818, + 12.952, + np.nan, + np.nan, + ] + expected = Series(expected_values)[::step] + tm.assert_series_equal(expected, result) + + +def test_cmov_window(step): + # GH 8238 + pytest.importorskip("scipy") + vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) + result = Series(vals).rolling(5, win_type="boxcar", center=True, step=step).mean() + expected_values = [ + np.nan, + np.nan, + 9.962, + 11.27, + 11.564, + 12.516, + 12.818, + 12.952, + np.nan, + np.nan, + ] + expected = Series(expected_values)[::step] + tm.assert_series_equal(expected, result) + + +def test_cmov_window_corner(step): + # GH 8238 + # all nan + pytest.importorskip("scipy") + vals = Series([np.nan] * 10) + result = vals.rolling(5, center=True, win_type="boxcar", step=step).mean() + assert np.isnan(result).all() + + # empty + vals = Series([], dtype=object) + result = vals.rolling(5, center=True, win_type="boxcar", step=step).mean() + assert len(result) == 0 + + # shorter than window + vals = Series(np.random.default_rng(2).standard_normal(5)) + result = vals.rolling(10, win_type="boxcar", step=step).mean() + assert np.isnan(result).all() + assert len(result) == len(range(0, 5, step or 1)) + + +@pytest.mark.parametrize( + "f,xp", + [ + ( + "mean", + [ + [np.nan, np.nan], + [np.nan, np.nan], + [9.252, 9.392], + [8.644, 9.906], + [8.87, 10.208], + [6.81, 8.588], + [7.792, 8.644], + [9.05, 7.824], + [np.nan, np.nan], + [np.nan, np.nan], + ], + ), + ( + "std", + [ + [np.nan, np.nan], + [np.nan, np.nan], + [3.789706, 4.068313], + [3.429232, 3.237411], + [3.589269, 3.220810], + [3.405195, 2.380655], + [3.281839, 2.369869], + [3.676846, 1.801799], + [np.nan, np.nan], + [np.nan, np.nan], + ], + ), + ( + "var", + [ + [np.nan, np.nan], + [np.nan, np.nan], + [14.36187, 16.55117], + [11.75963, 10.48083], + [12.88285, 10.37362], + [11.59535, 5.66752], + [10.77047, 5.61628], + [13.51920, 3.24648], + [np.nan, np.nan], + [np.nan, np.nan], + ], + ), + ( + "sum", + [ + [np.nan, np.nan], + [np.nan, np.nan], + [46.26, 46.96], + [43.22, 49.53], + [44.35, 51.04], + [34.05, 42.94], + [38.96, 43.22], + [45.25, 39.12], + [np.nan, np.nan], + [np.nan, np.nan], + ], + ), + ], +) +def test_cmov_window_frame(f, xp, step): + # Gh 8238 + pytest.importorskip("scipy") + df = DataFrame( + np.array( + [ + [12.18, 3.64], + [10.18, 9.16], + [13.24, 14.61], + [4.51, 8.11], + [6.15, 11.44], + [9.14, 6.21], + [11.31, 10.67], + [2.94, 6.51], + [9.42, 8.39], + [12.44, 7.34], + ] + ) + ) + xp = DataFrame(np.array(xp))[::step] + + roll = df.rolling(5, win_type="boxcar", center=True, step=step) + rs = getattr(roll, f)() + + tm.assert_frame_equal(xp, rs) + + +@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4, 5]) +def test_cmov_window_na_min_periods(step, min_periods): + pytest.importorskip("scipy") + vals = Series(np.random.default_rng(2).standard_normal(10)) + vals[4] = np.nan + vals[8] = np.nan + + xp = vals.rolling(5, min_periods=min_periods, center=True, step=step).mean() + rs = vals.rolling( + 5, win_type="boxcar", min_periods=min_periods, center=True, step=step + ).mean() + tm.assert_series_equal(xp, rs) + + +def test_cmov_window_regular(win_types, step): + # GH 8238 + pytest.importorskip("scipy") + vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) + xps = { + "hamming": [ + np.nan, + np.nan, + 8.71384, + 9.56348, + 12.38009, + 14.03687, + 13.8567, + 11.81473, + np.nan, + np.nan, + ], + "triang": [ + np.nan, + np.nan, + 9.28667, + 10.34667, + 12.00556, + 13.33889, + 13.38, + 12.33667, + np.nan, + np.nan, + ], + "barthann": [ + np.nan, + np.nan, + 8.4425, + 9.1925, + 12.5575, + 14.3675, + 14.0825, + 11.5675, + np.nan, + np.nan, + ], + "bohman": [ + np.nan, + np.nan, + 7.61599, + 9.1764, + 12.83559, + 14.17267, + 14.65923, + 11.10401, + np.nan, + np.nan, + ], + "blackmanharris": [ + np.nan, + np.nan, + 6.97691, + 9.16438, + 13.05052, + 14.02156, + 15.10512, + 10.74574, + np.nan, + np.nan, + ], + "nuttall": [ + np.nan, + np.nan, + 7.04618, + 9.16786, + 13.02671, + 14.03559, + 15.05657, + 10.78514, + np.nan, + np.nan, + ], + "blackman": [ + np.nan, + np.nan, + 7.73345, + 9.17869, + 12.79607, + 14.20036, + 14.57726, + 11.16988, + np.nan, + np.nan, + ], + "bartlett": [ + np.nan, + np.nan, + 8.4425, + 9.1925, + 12.5575, + 14.3675, + 14.0825, + 11.5675, + np.nan, + np.nan, + ], + } + + xp = Series(xps[win_types])[::step] + rs = Series(vals).rolling(5, win_type=win_types, center=True, step=step).mean() + tm.assert_series_equal(xp, rs) + + +def test_cmov_window_regular_linear_range(win_types, step): + # GH 8238 + pytest.importorskip("scipy") + vals = np.array(range(10), dtype=float) + xp = vals.copy() + xp[:2] = np.nan + xp[-2:] = np.nan + xp = Series(xp)[::step] + + rs = Series(vals).rolling(5, win_type=win_types, center=True, step=step).mean() + tm.assert_series_equal(xp, rs) + + +def test_cmov_window_regular_missing_data(win_types, step): + # GH 8238 + pytest.importorskip("scipy") + vals = np.array( + [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48] + ) + xps = { + "bartlett": [ + np.nan, + np.nan, + 9.70333, + 10.5225, + 8.4425, + 9.1925, + 12.5575, + 14.3675, + 15.61667, + 13.655, + ], + "blackman": [ + np.nan, + np.nan, + 9.04582, + 11.41536, + 7.73345, + 9.17869, + 12.79607, + 14.20036, + 15.8706, + 13.655, + ], + "barthann": [ + np.nan, + np.nan, + 9.70333, + 10.5225, + 8.4425, + 9.1925, + 12.5575, + 14.3675, + 15.61667, + 13.655, + ], + "bohman": [ + np.nan, + np.nan, + 8.9444, + 11.56327, + 7.61599, + 9.1764, + 12.83559, + 14.17267, + 15.90976, + 13.655, + ], + "hamming": [ + np.nan, + np.nan, + 9.59321, + 10.29694, + 8.71384, + 9.56348, + 12.38009, + 14.20565, + 15.24694, + 13.69758, + ], + "nuttall": [ + np.nan, + np.nan, + 8.47693, + 12.2821, + 7.04618, + 9.16786, + 13.02671, + 14.03673, + 16.08759, + 13.65553, + ], + "triang": [ + np.nan, + np.nan, + 9.33167, + 9.76125, + 9.28667, + 10.34667, + 12.00556, + 13.82125, + 14.49429, + 13.765, + ], + "blackmanharris": [ + np.nan, + np.nan, + 8.42526, + 12.36824, + 6.97691, + 9.16438, + 13.05052, + 14.02175, + 16.1098, + 13.65509, + ], + } + + xp = Series(xps[win_types])[::step] + rs = Series(vals).rolling(5, win_type=win_types, min_periods=3, step=step).mean() + tm.assert_series_equal(xp, rs) + + +def test_cmov_window_special(win_types_special, step): + # GH 8238 + pytest.importorskip("scipy") + kwds = { + "kaiser": {"beta": 1.0}, + "gaussian": {"std": 1.0}, + "general_gaussian": {"p": 2.0, "sig": 2.0}, + "exponential": {"tau": 10}, + } + + vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]) + + xps = { + "gaussian": [ + np.nan, + np.nan, + 8.97297, + 9.76077, + 12.24763, + 13.89053, + 13.65671, + 12.01002, + np.nan, + np.nan, + ], + "general_gaussian": [ + np.nan, + np.nan, + 9.85011, + 10.71589, + 11.73161, + 13.08516, + 12.95111, + 12.74577, + np.nan, + np.nan, + ], + "kaiser": [ + np.nan, + np.nan, + 9.86851, + 11.02969, + 11.65161, + 12.75129, + 12.90702, + 12.83757, + np.nan, + np.nan, + ], + "exponential": [ + np.nan, + np.nan, + 9.83364, + 11.10472, + 11.64551, + 12.66138, + 12.92379, + 12.83770, + np.nan, + np.nan, + ], + } + + xp = Series(xps[win_types_special])[::step] + rs = ( + Series(vals) + .rolling(5, win_type=win_types_special, center=True, step=step) + .mean(**kwds[win_types_special]) + ) + tm.assert_series_equal(xp, rs) + + +def test_cmov_window_special_linear_range(win_types_special, step): + # GH 8238 + pytest.importorskip("scipy") + kwds = { + "kaiser": {"beta": 1.0}, + "gaussian": {"std": 1.0}, + "general_gaussian": {"p": 2.0, "sig": 2.0}, + "slepian": {"width": 0.5}, + "exponential": {"tau": 10}, + } + + vals = np.array(range(10), dtype=float) + xp = vals.copy() + xp[:2] = np.nan + xp[-2:] = np.nan + xp = Series(xp)[::step] + + rs = ( + Series(vals) + .rolling(5, win_type=win_types_special, center=True, step=step) + .mean(**kwds[win_types_special]) + ) + tm.assert_series_equal(xp, rs) + + +def test_weighted_var_big_window_no_segfault(win_types, center): + # GitHub Issue #46772 + pytest.importorskip("scipy") + x = Series(0) + result = x.rolling(window=16, center=center, win_type=win_types).var() + expected = Series(np.nan) + + tm.assert_series_equal(result, expected) + + +def test_rolling_center_axis_1(): + pytest.importorskip("scipy") + df = DataFrame( + {"a": [1, 1, 0, 0, 0, 1], "b": [1, 0, 0, 1, 0, 0], "c": [1, 0, 0, 1, 0, 1]} + ) + + msg = "Support for axis=1 in DataFrame.rolling is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.rolling(window=3, axis=1, win_type="boxcar", center=True).sum() + + expected = DataFrame( + {"a": [np.nan] * 6, "b": [3.0, 1.0, 0.0, 2.0, 0.0, 2.0], "c": [np.nan] * 6} + ) + + tm.assert_frame_equal(result, expected, check_dtype=True)