diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_13_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_13_mp_rank_02_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..5ab5e270509ec3778fae39e248cfdd074471555d --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_13_mp_rank_02_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c750018c2e17fc64bedda06f49a605de1205b9816105b4c1722fc13c1fedb304 +size 41830330 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_184_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_184_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f7d9d78334339123cc717c2f4a6eb2b60147adf4 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_184_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6351546e4ec255305c11fdda0e2dbc59c084b16e834684d4f39785287846e8b +size 41830340 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_217_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_217_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6c9bb6265ad6875928adcd7a777141deddf2073b --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_217_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74d1c34395045bd259106a2b2aff837960ca1cabf3471137988d0c3a1ef9a5c9 +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_35_mp_rank_01_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_35_mp_rank_01_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ffc0b821080354a2a160d5e2d128a3260d1c4564 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_35_mp_rank_01_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:185700df1bd72fef4ff2c39c043aee39beb49b0a59569ecf30566b1c1a4d0465 +size 41830138 diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53ce98c1f5e8b89bbfa2e2daf2c0d1499ece50f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/_core.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/_core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..baf86a1bbf346369017c7dd0605f73a4394553d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/_core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__init__.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..75c61da03795af0d4f60cd4d4a8b8e0dd45e3d5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__init__.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas.plotting._matplotlib.boxplot import ( + BoxPlot, + boxplot, + boxplot_frame, + boxplot_frame_groupby, +) +from pandas.plotting._matplotlib.converter import ( + deregister, + register, +) +from pandas.plotting._matplotlib.core import ( + AreaPlot, + BarhPlot, + BarPlot, + HexBinPlot, + LinePlot, + PiePlot, + ScatterPlot, +) +from pandas.plotting._matplotlib.hist import ( + HistPlot, + KdePlot, + hist_frame, + hist_series, +) +from pandas.plotting._matplotlib.misc import ( + andrews_curves, + autocorrelation_plot, + bootstrap_plot, + lag_plot, + parallel_coordinates, + radviz, + scatter_matrix, +) +from pandas.plotting._matplotlib.tools import table + +if TYPE_CHECKING: + from pandas.plotting._matplotlib.core import MPLPlot + +PLOT_CLASSES: dict[str, type[MPLPlot]] = { + "line": LinePlot, + "bar": BarPlot, + "barh": BarhPlot, + "box": BoxPlot, + "hist": HistPlot, + "kde": KdePlot, + "area": AreaPlot, + "pie": PiePlot, + "scatter": ScatterPlot, + "hexbin": HexBinPlot, +} + + +def plot(data, kind, **kwargs): + # Importing pyplot at the top of the file (before the converters are + # registered) causes problems in matplotlib 2 (converters seem to not + # work) + import matplotlib.pyplot as plt + + if kwargs.pop("reuse_plot", False): + ax = kwargs.get("ax") + if ax is None and len(plt.get_fignums()) > 0: + with plt.rc_context(): + ax = plt.gca() + kwargs["ax"] = getattr(ax, "left_ax", ax) + plot_obj = PLOT_CLASSES[kind](data, **kwargs) + plot_obj.generate() + plot_obj.draw() + return plot_obj.result + + +__all__ = [ + "plot", + "hist_series", + "hist_frame", + "boxplot", + "boxplot_frame", + "boxplot_frame_groupby", + "table", + "andrews_curves", + "autocorrelation_plot", + "bootstrap_plot", + "lag_plot", + "parallel_coordinates", + "radviz", + "scatter_matrix", + "register", + "deregister", +] diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbd9e14dec83873622b48266a71c34edbacbb8e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/boxplot.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/boxplot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e154ef8588e0ef99e91fa0e231024a6686a406d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/boxplot.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/converter.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/converter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb2c441320ac25fe6b5d8d7c77e17cb7a594df8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/converter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/core.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2bbfd4b8d45f55a08a0acd37b0b07087b339932 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/groupby.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8293aa1ec7a2c742b4ae51988f08d6efdfb4407 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/groupby.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/hist.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/hist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56b154c136388c4055d61e3523f85425787f4fce Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/hist.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/misc.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b32e93a13c146c419407933617c307e9605dde7d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/misc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/style.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f09796af6e03be6919dabca68fe295b818be1070 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/style.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/timeseries.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/timeseries.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2f0e6dc125fa805f969eaf92914dbaa8976e04f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/timeseries.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/tools.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/tools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e2ff11c420ef30c2e9db7114b3d1a65134e1782 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/__pycache__/tools.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/boxplot.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/boxplot.py new file mode 100644 index 0000000000000000000000000000000000000000..d2b76decaa75d3e7adc9792763db4e276e514ff1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/boxplot.py @@ -0,0 +1,572 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, + NamedTuple, +) +import warnings + +from matplotlib.artist import setp +import numpy as np + +from pandas._libs import lib +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_dict_like +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.missing import remove_na_arraylike + +import pandas as pd +import pandas.core.common as com + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.core import ( + LinePlot, + MPLPlot, +) +from pandas.plotting._matplotlib.groupby import create_iter_data_given_by +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + maybe_adjust_figure, +) + +if TYPE_CHECKING: + from collections.abc import Collection + + from matplotlib.axes import Axes + from matplotlib.figure import Figure + from matplotlib.lines import Line2D + + from pandas._typing import MatplotlibColor + + +def _set_ticklabels(ax: Axes, labels: list[str], is_vertical: bool, **kwargs) -> None: + """Set the tick labels of a given axis. + + Due to https://github.com/matplotlib/matplotlib/pull/17266, we need to handle the + case of repeated ticks (due to `FixedLocator`) and thus we duplicate the number of + labels. + """ + ticks = ax.get_xticks() if is_vertical else ax.get_yticks() + if len(ticks) != len(labels): + i, remainder = divmod(len(ticks), len(labels)) + assert remainder == 0, remainder + labels *= i + if is_vertical: + ax.set_xticklabels(labels, **kwargs) + else: + ax.set_yticklabels(labels, **kwargs) + + +class BoxPlot(LinePlot): + @property + def _kind(self) -> Literal["box"]: + return "box" + + _layout_type = "horizontal" + + _valid_return_types = (None, "axes", "dict", "both") + + class BP(NamedTuple): + # namedtuple to hold results + ax: Axes + lines: dict[str, list[Line2D]] + + def __init__(self, data, return_type: str = "axes", **kwargs) -> None: + if return_type not in self._valid_return_types: + raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}") + + self.return_type = return_type + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called + + if self.subplots: + # Disable label ax sharing. Otherwise, all subplots shows last + # column label + if self.orientation == "vertical": + self.sharex = False + else: + self.sharey = False + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, ax: Axes, y: np.ndarray, column_num=None, return_type: str = "axes", **kwds + ): + ys: np.ndarray | list[np.ndarray] + if y.ndim == 2: + ys = [remove_na_arraylike(v) for v in y] + # Boxplot fails with empty arrays, so need to add a NaN + # if any cols are empty + # GH 8181 + ys = [v if v.size > 0 else np.array([np.nan]) for v in ys] + else: + ys = remove_na_arraylike(y) + bp = ax.boxplot(ys, **kwds) + + if return_type == "dict": + return bp, bp + elif return_type == "both": + return cls.BP(ax=ax, lines=bp), bp + else: + return ax, bp + + def _validate_color_args(self, color, colormap): + if color is lib.no_default: + return None + + if colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used " + "simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + + if isinstance(color, dict): + valid_keys = ["boxes", "whiskers", "medians", "caps"] + for key in color: + if key not in valid_keys: + raise ValueError( + f"color dict contains invalid key '{key}'. " + f"The key must be either {valid_keys}" + ) + return color + + @cache_readonly + def _color_attrs(self): + # get standard colors for default + # use 2 colors by default, for box/whisker and median + # flier colors isn't needed here + # because it can be specified by ``sym`` kw + return get_standard_colors(num_colors=3, colormap=self.colormap, color=None) + + @cache_readonly + def _boxes_c(self): + return self._color_attrs[0] + + @cache_readonly + def _whiskers_c(self): + return self._color_attrs[0] + + @cache_readonly + def _medians_c(self): + return self._color_attrs[2] + + @cache_readonly + def _caps_c(self): + return self._color_attrs[0] + + def _get_colors( + self, + num_colors=None, + color_kwds: dict[str, MatplotlibColor] + | MatplotlibColor + | Collection[MatplotlibColor] + | None = "color", + ) -> None: + pass + + def maybe_color_bp(self, bp) -> None: + if isinstance(self.color, dict): + boxes = self.color.get("boxes", self._boxes_c) + whiskers = self.color.get("whiskers", self._whiskers_c) + medians = self.color.get("medians", self._medians_c) + caps = self.color.get("caps", self._caps_c) + else: + # Other types are forwarded to matplotlib + # If None, use default colors + boxes = self.color or self._boxes_c + whiskers = self.color or self._whiskers_c + medians = self.color or self._medians_c + caps = self.color or self._caps_c + + color_tup = (boxes, whiskers, medians, caps) + maybe_color_bp(bp, color_tup=color_tup, **self.kwds) + + def _make_plot(self, fig: Figure) -> None: + if self.subplots: + self._return_obj = pd.Series(dtype=object) + + # Re-create iterated data if `by` is assigned by users + data = ( + create_iter_data_given_by(self.data, self._kind) + if self.by is not None + else self.data + ) + + # error: Argument "data" to "_iter_data" of "MPLPlot" has + # incompatible type "object"; expected "DataFrame | + # dict[Hashable, Series | DataFrame]" + for i, (label, y) in enumerate(self._iter_data(data=data)): # type: ignore[arg-type] + ax = self._get_ax(i) + kwds = self.kwds.copy() + + # When by is applied, show title for subplots to know which group it is + # just like df.boxplot, and need to apply T on y to provide right input + if self.by is not None: + y = y.T + ax.set_title(pprint_thing(label)) + + # When `by` is assigned, the ticklabels will become unique grouped + # values, instead of label which is used as subtitle in this case. + # error: "Index" has no attribute "levels"; maybe "nlevels"? + levels = self.data.columns.levels # type: ignore[attr-defined] + ticklabels = [pprint_thing(col) for col in levels[0]] + else: + ticklabels = [pprint_thing(label)] + + ret, bp = self._plot( + ax, y, column_num=i, return_type=self.return_type, **kwds + ) + self.maybe_color_bp(bp) + self._return_obj[label] = ret + _set_ticklabels( + ax=ax, labels=ticklabels, is_vertical=self.orientation == "vertical" + ) + else: + y = self.data.values.T + ax = self._get_ax(0) + kwds = self.kwds.copy() + + ret, bp = self._plot( + ax, y, column_num=0, return_type=self.return_type, **kwds + ) + self.maybe_color_bp(bp) + self._return_obj = ret + + labels = [pprint_thing(left) for left in self.data.columns] + if not self.use_index: + labels = [pprint_thing(key) for key in range(len(labels))] + _set_ticklabels( + ax=ax, labels=labels, is_vertical=self.orientation == "vertical" + ) + + def _make_legend(self) -> None: + pass + + def _post_plot_logic(self, ax: Axes, data) -> None: + # GH 45465: make sure that the boxplot doesn't ignore xlabel/ylabel + if self.xlabel: + ax.set_xlabel(pprint_thing(self.xlabel)) + if self.ylabel: + ax.set_ylabel(pprint_thing(self.ylabel)) + + @property + def orientation(self) -> Literal["horizontal", "vertical"]: + if self.kwds.get("vert", True): + return "vertical" + else: + return "horizontal" + + @property + def result(self): + if self.return_type is None: + return super().result + else: + return self._return_obj + + +def maybe_color_bp(bp, color_tup, **kwds) -> None: + # GH#30346, when users specifying those arguments explicitly, our defaults + # for these four kwargs should be overridden; if not, use Pandas settings + if not kwds.get("boxprops"): + setp(bp["boxes"], color=color_tup[0], alpha=1) + if not kwds.get("whiskerprops"): + setp(bp["whiskers"], color=color_tup[1], alpha=1) + if not kwds.get("medianprops"): + setp(bp["medians"], color=color_tup[2], alpha=1) + if not kwds.get("capprops"): + setp(bp["caps"], color=color_tup[3], alpha=1) + + +def _grouped_plot_by_column( + plotf, + data, + columns=None, + by=None, + numeric_only: bool = True, + grid: bool = False, + figsize: tuple[float, float] | None = None, + ax=None, + layout=None, + return_type=None, + **kwargs, +): + grouped = data.groupby(by, observed=False) + if columns is None: + if not isinstance(by, (list, tuple)): + by = [by] + columns = data._get_numeric_data().columns.difference(by) + naxes = len(columns) + fig, axes = create_subplots( + naxes=naxes, + sharex=kwargs.pop("sharex", True), + sharey=kwargs.pop("sharey", True), + figsize=figsize, + ax=ax, + layout=layout, + ) + + _axes = flatten_axes(axes) + + # GH 45465: move the "by" label based on "vert" + xlabel, ylabel = kwargs.pop("xlabel", None), kwargs.pop("ylabel", None) + if kwargs.get("vert", True): + xlabel = xlabel or by + else: + ylabel = ylabel or by + + ax_values = [] + + for i, col in enumerate(columns): + ax = _axes[i] + gp_col = grouped[col] + keys, values = zip(*gp_col) + re_plotf = plotf(keys, values, ax, xlabel=xlabel, ylabel=ylabel, **kwargs) + ax.set_title(col) + ax_values.append(re_plotf) + ax.grid(grid) + + result = pd.Series(ax_values, index=columns, copy=False) + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type is None: + result = axes + + byline = by[0] if len(by) == 1 else by + fig.suptitle(f"Boxplot grouped by {byline}") + maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) + + return result + + +def boxplot( + data, + column=None, + by=None, + ax=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout=None, + return_type=None, + **kwds, +): + import matplotlib.pyplot as plt + + # validate return_type: + if return_type not in BoxPlot._valid_return_types: + raise ValueError("return_type must be {'axes', 'dict', 'both'}") + + if isinstance(data, ABCSeries): + data = data.to_frame("x") + column = "x" + + def _get_colors(): + # num_colors=3 is required as method maybe_color_bp takes the colors + # in positions 0 and 2. + # if colors not provided, use same defaults as DataFrame.plot.box + result = get_standard_colors(num_colors=3) + result = np.take(result, [0, 0, 2]) + result = np.append(result, "k") + + colors = kwds.pop("color", None) + if colors: + if is_dict_like(colors): + # replace colors in result array with user-specified colors + # taken from the colors dict parameter + # "boxes" value placed in position 0, "whiskers" in 1, etc. + valid_keys = ["boxes", "whiskers", "medians", "caps"] + key_to_index = dict(zip(valid_keys, range(4))) + for key, value in colors.items(): + if key in valid_keys: + result[key_to_index[key]] = value + else: + raise ValueError( + f"color dict contains invalid key '{key}'. " + f"The key must be either {valid_keys}" + ) + else: + result.fill(colors) + + return result + + def plot_group(keys, values, ax: Axes, **kwds): + # GH 45465: xlabel/ylabel need to be popped out before plotting happens + xlabel, ylabel = kwds.pop("xlabel", None), kwds.pop("ylabel", None) + if xlabel: + ax.set_xlabel(pprint_thing(xlabel)) + if ylabel: + ax.set_ylabel(pprint_thing(ylabel)) + + keys = [pprint_thing(x) for x in keys] + values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values] + bp = ax.boxplot(values, **kwds) + if fontsize is not None: + ax.tick_params(axis="both", labelsize=fontsize) + + # GH 45465: x/y are flipped when "vert" changes + _set_ticklabels( + ax=ax, labels=keys, is_vertical=kwds.get("vert", True), rotation=rot + ) + maybe_color_bp(bp, color_tup=colors, **kwds) + + # Return axes in multiplot case, maybe revisit later # 985 + if return_type == "dict": + return bp + elif return_type == "both": + return BoxPlot.BP(ax=ax, lines=bp) + else: + return ax + + colors = _get_colors() + if column is None: + columns = None + elif isinstance(column, (list, tuple)): + columns = column + else: + columns = [column] + + if by is not None: + # Prefer array return type for 2-D plots to match the subplot layout + # https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580 + result = _grouped_plot_by_column( + plot_group, + data, + columns=columns, + by=by, + grid=grid, + figsize=figsize, + ax=ax, + layout=layout, + return_type=return_type, + **kwds, + ) + else: + if return_type is None: + return_type = "axes" + if layout is not None: + raise ValueError("The 'layout' keyword is not supported when 'by' is None") + + if ax is None: + rc = {"figure.figsize": figsize} if figsize is not None else {} + with plt.rc_context(rc): + ax = plt.gca() + data = data._get_numeric_data() + naxes = len(data.columns) + if naxes == 0: + raise ValueError( + "boxplot method requires numerical columns, nothing to plot." + ) + if columns is None: + columns = data.columns + else: + data = data[columns] + + result = plot_group(columns, data.values.T, ax, **kwds) + ax.grid(grid) + + return result + + +def boxplot_frame( + self, + column=None, + by=None, + ax=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout=None, + return_type=None, + **kwds, +): + import matplotlib.pyplot as plt + + ax = boxplot( + self, + column=column, + by=by, + ax=ax, + fontsize=fontsize, + grid=grid, + rot=rot, + figsize=figsize, + layout=layout, + return_type=return_type, + **kwds, + ) + plt.draw_if_interactive() + return ax + + +def boxplot_frame_groupby( + grouped, + subplots: bool = True, + column=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + ax=None, + figsize: tuple[float, float] | None = None, + layout=None, + sharex: bool = False, + sharey: bool = True, + **kwds, +): + if subplots is True: + naxes = len(grouped) + fig, axes = create_subplots( + naxes=naxes, + squeeze=False, + ax=ax, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + ) + axes = flatten_axes(axes) + + ret = pd.Series(dtype=object) + + for (key, group), ax in zip(grouped, axes): + d = group.boxplot( + ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds + ) + ax.set_title(pprint_thing(key)) + ret.loc[key] = d + maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) + else: + keys, frames = zip(*grouped) + if grouped.axis == 0: + df = pd.concat(frames, keys=keys, axis=1) + elif len(frames) > 1: + df = frames[0].join(frames[1::]) + else: + df = frames[0] + + # GH 16748, DataFrameGroupby fails when subplots=False and `column` argument + # is assigned, and in this case, since `df` here becomes MI after groupby, + # so we need to couple the keys (grouped values) and column (original df + # column) together to search for subset to plot + if column is not None: + column = com.convert_to_list_like(column) + multi_key = pd.MultiIndex.from_product([keys, column]) + column = list(multi_key.values) + ret = df.boxplot( + column=column, + fontsize=fontsize, + rot=rot, + grid=grid, + ax=ax, + figsize=figsize, + layout=layout, + **kwds, + ) + return ret diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/converter.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/converter.py new file mode 100644 index 0000000000000000000000000000000000000000..0eb3318ac96c5589b01b7d891d8b0234072160f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/converter.py @@ -0,0 +1,1135 @@ +from __future__ import annotations + +import contextlib +import datetime as pydt +from datetime import ( + datetime, + timedelta, + tzinfo, +) +import functools +from typing import ( + TYPE_CHECKING, + Any, + cast, +) +import warnings + +import matplotlib.dates as mdates +from matplotlib.ticker import ( + AutoLocator, + Formatter, + Locator, +) +from matplotlib.transforms import nonsingular +import matplotlib.units as munits +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs import ( + Timestamp, + to_offset, +) +from pandas._libs.tslibs.dtypes import ( + FreqGroup, + periods_per_day, +) +from pandas._typing import ( + F, + npt, +) + +from pandas.core.dtypes.common import ( + is_float, + is_float_dtype, + is_integer, + is_integer_dtype, + is_nested_list_like, +) + +from pandas import ( + Index, + Series, + get_option, +) +import pandas.core.common as com +from pandas.core.indexes.datetimes import date_range +from pandas.core.indexes.period import ( + Period, + PeriodIndex, + period_range, +) +import pandas.core.tools.datetimes as tools + +if TYPE_CHECKING: + from collections.abc import Generator + + from matplotlib.axis import Axis + + from pandas._libs.tslibs.offsets import BaseOffset + + +_mpl_units = {} # Cache for units overwritten by us + + +def get_pairs(): + pairs = [ + (Timestamp, DatetimeConverter), + (Period, PeriodConverter), + (pydt.datetime, DatetimeConverter), + (pydt.date, DatetimeConverter), + (pydt.time, TimeConverter), + (np.datetime64, DatetimeConverter), + ] + return pairs + + +def register_pandas_matplotlib_converters(func: F) -> F: + """ + Decorator applying pandas_converters. + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + with pandas_converters(): + return func(*args, **kwargs) + + return cast(F, wrapper) + + +@contextlib.contextmanager +def pandas_converters() -> Generator[None, None, None]: + """ + Context manager registering pandas' converters for a plot. + + See Also + -------- + register_pandas_matplotlib_converters : Decorator that applies this. + """ + value = get_option("plotting.matplotlib.register_converters") + + if value: + # register for True or "auto" + register() + try: + yield + finally: + if value == "auto": + # only deregister for "auto" + deregister() + + +def register() -> None: + pairs = get_pairs() + for type_, cls in pairs: + # Cache previous converter if present + if type_ in munits.registry and not isinstance(munits.registry[type_], cls): + previous = munits.registry[type_] + _mpl_units[type_] = previous + # Replace with pandas converter + munits.registry[type_] = cls() + + +def deregister() -> None: + # Renamed in pandas.plotting.__init__ + for type_, cls in get_pairs(): + # We use type to catch our classes directly, no inheritance + if type(munits.registry.get(type_)) is cls: + munits.registry.pop(type_) + + # restore the old keys + for unit, formatter in _mpl_units.items(): + if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: + # make it idempotent by excluding ours. + munits.registry[unit] = formatter + + +def _to_ordinalf(tm: pydt.time) -> float: + tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10**6 + return tot_sec + + +def time2num(d): + if isinstance(d, str): + parsed = Timestamp(d) + return _to_ordinalf(parsed.time()) + if isinstance(d, pydt.time): + return _to_ordinalf(d) + return d + + +class TimeConverter(munits.ConversionInterface): + @staticmethod + def convert(value, unit, axis): + valid_types = (str, pydt.time) + if isinstance(value, valid_types) or is_integer(value) or is_float(value): + return time2num(value) + if isinstance(value, Index): + return value.map(time2num) + if isinstance(value, (list, tuple, np.ndarray, Index)): + return [time2num(x) for x in value] + return value + + @staticmethod + def axisinfo(unit, axis) -> munits.AxisInfo | None: + if unit != "time": + return None + + majloc = AutoLocator() + majfmt = TimeFormatter(majloc) + return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label="time") + + @staticmethod + def default_units(x, axis) -> str: + return "time" + + +# time formatter +class TimeFormatter(Formatter): + def __init__(self, locs) -> None: + self.locs = locs + + def __call__(self, x, pos: int | None = 0) -> str: + """ + Return the time of day as a formatted string. + + Parameters + ---------- + x : float + The time of day specified as seconds since 00:00 (midnight), + with up to microsecond precision. + pos + Unused + + Returns + ------- + str + A string in HH:MM:SS.mmmuuu format. Microseconds, + milliseconds and seconds are only displayed if non-zero. + """ + fmt = "%H:%M:%S.%f" + s = int(x) + msus = round((x - s) * 10**6) + ms = msus // 1000 + us = msus % 1000 + m, s = divmod(s, 60) + h, m = divmod(m, 60) + _, h = divmod(h, 24) + if us != 0: + return pydt.time(h, m, s, msus).strftime(fmt) + elif ms != 0: + return pydt.time(h, m, s, msus).strftime(fmt)[:-3] + elif s != 0: + return pydt.time(h, m, s).strftime("%H:%M:%S") + + return pydt.time(h, m).strftime("%H:%M") + + +# Period Conversion + + +class PeriodConverter(mdates.DateConverter): + @staticmethod + def convert(values, units, axis): + if is_nested_list_like(values): + values = [PeriodConverter._convert_1d(v, units, axis) for v in values] + else: + values = PeriodConverter._convert_1d(values, units, axis) + return values + + @staticmethod + def _convert_1d(values, units, axis): + if not hasattr(axis, "freq"): + raise TypeError("Axis must have `freq` set to convert to Periods") + valid_types = (str, datetime, Period, pydt.date, pydt.time, np.datetime64) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + if ( + isinstance(values, valid_types) + or is_integer(values) + or is_float(values) + ): + return get_datevalue(values, axis.freq) + elif isinstance(values, PeriodIndex): + return values.asfreq(axis.freq).asi8 + elif isinstance(values, Index): + return values.map(lambda x: get_datevalue(x, axis.freq)) + elif lib.infer_dtype(values, skipna=False) == "period": + # https://github.com/pandas-dev/pandas/issues/24304 + # convert ndarray[period] -> PeriodIndex + return PeriodIndex(values, freq=axis.freq).asi8 + elif isinstance(values, (list, tuple, np.ndarray, Index)): + return [get_datevalue(x, axis.freq) for x in values] + return values + + +def get_datevalue(date, freq): + if isinstance(date, Period): + return date.asfreq(freq).ordinal + elif isinstance(date, (str, datetime, pydt.date, pydt.time, np.datetime64)): + return Period(date, freq).ordinal + elif ( + is_integer(date) + or is_float(date) + or (isinstance(date, (np.ndarray, Index)) and (date.size == 1)) + ): + return date + elif date is None: + return None + raise ValueError(f"Unrecognizable date '{date}'") + + +# Datetime Conversion +class DatetimeConverter(mdates.DateConverter): + @staticmethod + def convert(values, unit, axis): + # values might be a 1-d array, or a list-like of arrays. + if is_nested_list_like(values): + values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] + else: + values = DatetimeConverter._convert_1d(values, unit, axis) + return values + + @staticmethod + def _convert_1d(values, unit, axis): + def try_parse(values): + try: + return mdates.date2num(tools.to_datetime(values)) + except Exception: + return values + + if isinstance(values, (datetime, pydt.date, np.datetime64, pydt.time)): + return mdates.date2num(values) + elif is_integer(values) or is_float(values): + return values + elif isinstance(values, str): + return try_parse(values) + elif isinstance(values, (list, tuple, np.ndarray, Index, Series)): + if isinstance(values, Series): + # https://github.com/matplotlib/matplotlib/issues/11391 + # Series was skipped. Convert to DatetimeIndex to get asi8 + values = Index(values) + if isinstance(values, Index): + values = values.values + if not isinstance(values, np.ndarray): + values = com.asarray_tuplesafe(values) + + if is_integer_dtype(values) or is_float_dtype(values): + return values + + try: + values = tools.to_datetime(values) + except Exception: + pass + + values = mdates.date2num(values) + + return values + + @staticmethod + def axisinfo(unit: tzinfo | None, axis) -> munits.AxisInfo: + """ + Return the :class:`~matplotlib.units.AxisInfo` for *unit*. + + *unit* is a tzinfo instance or None. + The *axis* argument is required but not used. + """ + tz = unit + + majloc = PandasAutoDateLocator(tz=tz) + majfmt = PandasAutoDateFormatter(majloc, tz=tz) + datemin = pydt.date(2000, 1, 1) + datemax = pydt.date(2010, 1, 1) + + return munits.AxisInfo( + majloc=majloc, majfmt=majfmt, label="", default_limits=(datemin, datemax) + ) + + +class PandasAutoDateFormatter(mdates.AutoDateFormatter): + def __init__(self, locator, tz=None, defaultfmt: str = "%Y-%m-%d") -> None: + mdates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt) + + +class PandasAutoDateLocator(mdates.AutoDateLocator): + def get_locator(self, dmin, dmax): + """Pick the best locator based on a distance.""" + tot_sec = (dmax - dmin).total_seconds() + + if abs(tot_sec) < self.minticks: + self._freq = -1 + locator = MilliSecondLocator(self.tz) + locator.set_axis(self.axis) + + # error: Item "None" of "Axis | _DummyAxis | _AxisWrapper | None" + # has no attribute "get_data_interval" + locator.axis.set_view_interval( # type: ignore[union-attr] + *self.axis.get_view_interval() # type: ignore[union-attr] + ) + locator.axis.set_data_interval( # type: ignore[union-attr] + *self.axis.get_data_interval() # type: ignore[union-attr] + ) + return locator + + return mdates.AutoDateLocator.get_locator(self, dmin, dmax) + + def _get_unit(self): + return MilliSecondLocator.get_unit_generic(self._freq) + + +class MilliSecondLocator(mdates.DateLocator): + UNIT = 1.0 / (24 * 3600 * 1000) + + def __init__(self, tz) -> None: + mdates.DateLocator.__init__(self, tz) + self._interval = 1.0 + + def _get_unit(self): + return self.get_unit_generic(-1) + + @staticmethod + def get_unit_generic(freq): + unit = mdates.RRuleLocator.get_unit_generic(freq) + if unit < 0: + return MilliSecondLocator.UNIT + return unit + + def __call__(self): + # if no data have been set, this will tank with a ValueError + try: + dmin, dmax = self.viewlim_to_dt() + except ValueError: + return [] + + # We need to cap at the endpoints of valid datetime + nmax, nmin = mdates.date2num((dmax, dmin)) + + num = (nmax - nmin) * 86400 * 1000 + max_millis_ticks = 6 + for interval in [1, 10, 50, 100, 200, 500]: + if num <= interval * (max_millis_ticks - 1): + self._interval = interval + break + # We went through the whole loop without breaking, default to 1 + self._interval = 1000.0 + + estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) + + if estimate > self.MAXTICKS * 2: + raise RuntimeError( + "MillisecondLocator estimated to generate " + f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS" + f"* 2 ({self.MAXTICKS * 2:d}) " + ) + + interval = self._get_interval() + freq = f"{interval}ms" + tz = self.tz.tzname(None) + st = dmin.replace(tzinfo=None) + ed = dmin.replace(tzinfo=None) + all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object) + + try: + if len(all_dates) > 0: + locs = self.raise_if_exceeds(mdates.date2num(all_dates)) + return locs + except Exception: # pragma: no cover + pass + + lims = mdates.date2num([dmin, dmax]) + return lims + + def _get_interval(self): + return self._interval + + def autoscale(self): + """ + Set the view limits to include the data range. + """ + # We need to cap at the endpoints of valid datetime + dmin, dmax = self.datalim_to_dt() + + vmin = mdates.date2num(dmin) + vmax = mdates.date2num(dmax) + + return self.nonsingular(vmin, vmax) + + +def _from_ordinal(x, tz: tzinfo | None = None) -> datetime: + ix = int(x) + dt = datetime.fromordinal(ix) + remainder = float(x) - ix + hour, remainder = divmod(24 * remainder, 1) + minute, remainder = divmod(60 * remainder, 1) + second, remainder = divmod(60 * remainder, 1) + microsecond = int(1_000_000 * remainder) + if microsecond < 10: + microsecond = 0 # compensate for rounding errors + dt = datetime( + dt.year, dt.month, dt.day, int(hour), int(minute), int(second), microsecond + ) + if tz is not None: + dt = dt.astimezone(tz) + + if microsecond > 999990: # compensate for rounding errors + dt += timedelta(microseconds=1_000_000 - microsecond) + + return dt + + +# Fixed frequency dynamic tick locators and formatters + +# ------------------------------------------------------------------------- +# --- Locators --- +# ------------------------------------------------------------------------- + + +def _get_default_annual_spacing(nyears) -> tuple[int, int]: + """ + Returns a default spacing between consecutive ticks for annual data. + """ + if nyears < 11: + (min_spacing, maj_spacing) = (1, 1) + elif nyears < 20: + (min_spacing, maj_spacing) = (1, 2) + elif nyears < 50: + (min_spacing, maj_spacing) = (1, 5) + elif nyears < 100: + (min_spacing, maj_spacing) = (5, 10) + elif nyears < 200: + (min_spacing, maj_spacing) = (5, 25) + elif nyears < 600: + (min_spacing, maj_spacing) = (10, 50) + else: + factor = nyears // 1000 + 1 + (min_spacing, maj_spacing) = (factor * 20, factor * 100) + return (min_spacing, maj_spacing) + + +def _period_break(dates: PeriodIndex, period: str) -> npt.NDArray[np.intp]: + """ + Returns the indices where the given period changes. + + Parameters + ---------- + dates : PeriodIndex + Array of intervals to monitor. + period : str + Name of the period to monitor. + """ + mask = _period_break_mask(dates, period) + return np.nonzero(mask)[0] + + +def _period_break_mask(dates: PeriodIndex, period: str) -> npt.NDArray[np.bool_]: + current = getattr(dates, period) + previous = getattr(dates - 1 * dates.freq, period) + return current != previous + + +def has_level_label(label_flags: npt.NDArray[np.intp], vmin: float) -> bool: + """ + Returns true if the ``label_flags`` indicate there is at least one label + for this level. + + if the minimum view limit is not an exact integer, then the first tick + label won't be shown, so we must adjust for that. + """ + if label_flags.size == 0 or ( + label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0 + ): + return False + else: + return True + + +def _get_periods_per_ymd(freq: BaseOffset) -> tuple[int, int, int]: + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + freq_group = FreqGroup.from_period_dtype_code(dtype_code) + + ppd = -1 # placeholder for above-day freqs + + if dtype_code >= FreqGroup.FR_HR.value: + # error: "BaseOffset" has no attribute "_creso" + ppd = periods_per_day(freq._creso) # type: ignore[attr-defined] + ppm = 28 * ppd + ppy = 365 * ppd + elif freq_group == FreqGroup.FR_BUS: + ppm = 19 + ppy = 261 + elif freq_group == FreqGroup.FR_DAY: + ppm = 28 + ppy = 365 + elif freq_group == FreqGroup.FR_WK: + ppm = 3 + ppy = 52 + elif freq_group == FreqGroup.FR_MTH: + ppm = 1 + ppy = 12 + elif freq_group == FreqGroup.FR_QTR: + ppm = -1 # placerholder + ppy = 4 + elif freq_group == FreqGroup.FR_ANN: + ppm = -1 # placeholder + ppy = 1 + else: + raise NotImplementedError(f"Unsupported frequency: {dtype_code}") + + return ppd, ppm, ppy + + +def _daily_finder(vmin, vmax, freq: BaseOffset) -> np.ndarray: + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + + periodsperday, periodspermonth, periodsperyear = _get_periods_per_ymd(freq) + + # save this for later usage + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "Period with BDay freq is deprecated", category=FutureWarning + ) + warnings.filterwarnings( + "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning + ) + dates_ = period_range( + start=Period(ordinal=vmin, freq=freq), + end=Period(ordinal=vmax, freq=freq), + freq=freq, + ) + + # Initialize the output + info = np.zeros( + span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")] + ) + info["val"][:] = dates_.asi8 + info["fmt"][:] = "" + info["maj"][[0, -1]] = True + # .. and set some shortcuts + info_maj = info["maj"] + info_min = info["min"] + info_fmt = info["fmt"] + + def first_label(label_flags): + if (label_flags[0] == 0) and (label_flags.size > 1) and ((vmin_orig % 1) > 0.0): + return label_flags[1] + else: + return label_flags[0] + + # Case 1. Less than a month + if span <= periodspermonth: + day_start = _period_break(dates_, "day") + month_start = _period_break(dates_, "month") + year_start = _period_break(dates_, "year") + + def _hour_finder(label_interval: int, force_year_start: bool) -> None: + target = dates_.hour + mask = _period_break_mask(dates_, "hour") + info_maj[day_start] = True + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M" + info_fmt[day_start] = "%H:%M\n%d-%b" + info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" + if force_year_start and not has_level_label(year_start, vmin_orig): + info_fmt[first_label(day_start)] = "%H:%M\n%d-%b\n%Y" + + def _minute_finder(label_interval: int) -> None: + target = dates_.minute + hour_start = _period_break(dates_, "hour") + mask = _period_break_mask(dates_, "minute") + info_maj[hour_start] = True + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M" + info_fmt[day_start] = "%H:%M\n%d-%b" + info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" + + def _second_finder(label_interval: int) -> None: + target = dates_.second + minute_start = _period_break(dates_, "minute") + mask = _period_break_mask(dates_, "second") + info_maj[minute_start] = True + info_min[mask & (target % label_interval == 0)] = True + info_fmt[mask & (target % label_interval == 0)] = "%H:%M:%S" + info_fmt[day_start] = "%H:%M:%S\n%d-%b" + info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y" + + if span < periodsperday / 12000: + _second_finder(1) + elif span < periodsperday / 6000: + _second_finder(2) + elif span < periodsperday / 2400: + _second_finder(5) + elif span < periodsperday / 1200: + _second_finder(10) + elif span < periodsperday / 800: + _second_finder(15) + elif span < periodsperday / 400: + _second_finder(30) + elif span < periodsperday / 150: + _minute_finder(1) + elif span < periodsperday / 70: + _minute_finder(2) + elif span < periodsperday / 24: + _minute_finder(5) + elif span < periodsperday / 12: + _minute_finder(15) + elif span < periodsperday / 6: + _minute_finder(30) + elif span < periodsperday / 2.5: + _hour_finder(1, False) + elif span < periodsperday / 1.5: + _hour_finder(2, False) + elif span < periodsperday * 1.25: + _hour_finder(3, False) + elif span < periodsperday * 2.5: + _hour_finder(6, True) + elif span < periodsperday * 4: + _hour_finder(12, True) + else: + info_maj[month_start] = True + info_min[day_start] = True + info_fmt[day_start] = "%d" + info_fmt[month_start] = "%d\n%b" + info_fmt[year_start] = "%d\n%b\n%Y" + if not has_level_label(year_start, vmin_orig): + if not has_level_label(month_start, vmin_orig): + info_fmt[first_label(day_start)] = "%d\n%b\n%Y" + else: + info_fmt[first_label(month_start)] = "%d\n%b\n%Y" + + # Case 2. Less than three months + elif span <= periodsperyear // 4: + month_start = _period_break(dates_, "month") + info_maj[month_start] = True + if dtype_code < FreqGroup.FR_HR.value: + info["min"] = True + else: + day_start = _period_break(dates_, "day") + info["min"][day_start] = True + week_start = _period_break(dates_, "week") + year_start = _period_break(dates_, "year") + info_fmt[week_start] = "%d" + info_fmt[month_start] = "\n\n%b" + info_fmt[year_start] = "\n\n%b\n%Y" + if not has_level_label(year_start, vmin_orig): + if not has_level_label(month_start, vmin_orig): + info_fmt[first_label(week_start)] = "\n\n%b\n%Y" + else: + info_fmt[first_label(month_start)] = "\n\n%b\n%Y" + # Case 3. Less than 14 months ............... + elif span <= 1.15 * periodsperyear: + year_start = _period_break(dates_, "year") + month_start = _period_break(dates_, "month") + week_start = _period_break(dates_, "week") + info_maj[month_start] = True + info_min[week_start] = True + info_min[year_start] = False + info_min[month_start] = False + info_fmt[month_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + if not has_level_label(year_start, vmin_orig): + info_fmt[first_label(month_start)] = "%b\n%Y" + # Case 4. Less than 2.5 years ............... + elif span <= 2.5 * periodsperyear: + year_start = _period_break(dates_, "year") + quarter_start = _period_break(dates_, "quarter") + month_start = _period_break(dates_, "month") + info_maj[quarter_start] = True + info_min[month_start] = True + info_fmt[quarter_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + # Case 4. Less than 4 years ................. + elif span <= 4 * periodsperyear: + year_start = _period_break(dates_, "year") + month_start = _period_break(dates_, "month") + info_maj[year_start] = True + info_min[month_start] = True + info_min[year_start] = False + + month_break = dates_[month_start].month + jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] + info_fmt[jan_or_jul] = "%b" + info_fmt[year_start] = "%b\n%Y" + # Case 5. Less than 11 years ................ + elif span <= 11 * periodsperyear: + year_start = _period_break(dates_, "year") + quarter_start = _period_break(dates_, "quarter") + info_maj[year_start] = True + info_min[quarter_start] = True + info_min[year_start] = False + info_fmt[year_start] = "%Y" + # Case 6. More than 12 years ................ + else: + year_start = _period_break(dates_, "year") + year_break = dates_[year_start].year + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + major_idx = year_start[(year_break % maj_anndef == 0)] + info_maj[major_idx] = True + minor_idx = year_start[(year_break % min_anndef == 0)] + info_min[minor_idx] = True + info_fmt[major_idx] = "%Y" + + return info + + +def _monthly_finder(vmin, vmax, freq: BaseOffset) -> np.ndarray: + _, _, periodsperyear = _get_periods_per_ymd(freq) + + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + # Initialize the output + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + dates_ = info["val"] + info["fmt"] = "" + year_start = (dates_ % 12 == 0).nonzero()[0] + info_maj = info["maj"] + info_fmt = info["fmt"] + + if span <= 1.15 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + info_fmt[:] = "%b" + info_fmt[year_start] = "%b\n%Y" + + if not has_level_label(year_start, vmin_orig): + if dates_.size > 1: + idx = 1 + else: + idx = 0 + info_fmt[idx] = "%b\n%Y" + + elif span <= 2.5 * periodsperyear: + quarter_start = (dates_ % 3 == 0).nonzero() + info_maj[year_start] = True + # TODO: Check the following : is it really info['fmt'] ? + # 2023-09-15 this is reached in test_finder_monthly + info["fmt"][quarter_start] = True + info["min"] = True + + info_fmt[quarter_start] = "%b" + info_fmt[year_start] = "%b\n%Y" + + elif span <= 4 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6) + info_fmt[jan_or_jul] = "%b" + info_fmt[year_start] = "%b\n%Y" + + elif span <= 11 * periodsperyear: + quarter_start = (dates_ % 3 == 0).nonzero() + info_maj[year_start] = True + info["min"][quarter_start] = True + + info_fmt[year_start] = "%Y" + + else: + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + years = dates_[year_start] // 12 + 1 + major_idx = year_start[(years % maj_anndef == 0)] + info_maj[major_idx] = True + info["min"][year_start[(years % min_anndef == 0)]] = True + + info_fmt[major_idx] = "%Y" + + return info + + +def _quarterly_finder(vmin, vmax, freq: BaseOffset) -> np.ndarray: + _, _, periodsperyear = _get_periods_per_ymd(freq) + vmin_orig = vmin + (vmin, vmax) = (int(vmin), int(vmax)) + span = vmax - vmin + 1 + + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + info["fmt"] = "" + dates_ = info["val"] + info_maj = info["maj"] + info_fmt = info["fmt"] + year_start = (dates_ % 4 == 0).nonzero()[0] + + if span <= 3.5 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + + info_fmt[:] = "Q%q" + info_fmt[year_start] = "Q%q\n%F" + if not has_level_label(year_start, vmin_orig): + if dates_.size > 1: + idx = 1 + else: + idx = 0 + info_fmt[idx] = "Q%q\n%F" + + elif span <= 11 * periodsperyear: + info_maj[year_start] = True + info["min"] = True + info_fmt[year_start] = "%F" + + else: + # https://github.com/pandas-dev/pandas/pull/47602 + years = dates_[year_start] // 4 + 1970 + nyears = span / periodsperyear + (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) + major_idx = year_start[(years % maj_anndef == 0)] + info_maj[major_idx] = True + info["min"][year_start[(years % min_anndef == 0)]] = True + info_fmt[major_idx] = "%F" + + return info + + +def _annual_finder(vmin, vmax, freq: BaseOffset) -> np.ndarray: + # Note: small difference here vs other finders in adding 1 to vmax + (vmin, vmax) = (int(vmin), int(vmax + 1)) + span = vmax - vmin + 1 + + info = np.zeros( + span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] + ) + info["val"] = np.arange(vmin, vmax + 1) + info["fmt"] = "" + dates_ = info["val"] + + (min_anndef, maj_anndef) = _get_default_annual_spacing(span) + major_idx = dates_ % maj_anndef == 0 + minor_idx = dates_ % min_anndef == 0 + info["maj"][major_idx] = True + info["min"][minor_idx] = True + info["fmt"][major_idx] = "%Y" + + return info + + +def get_finder(freq: BaseOffset): + # error: "BaseOffset" has no attribute "_period_dtype_code" + dtype_code = freq._period_dtype_code # type: ignore[attr-defined] + fgroup = FreqGroup.from_period_dtype_code(dtype_code) + + if fgroup == FreqGroup.FR_ANN: + return _annual_finder + elif fgroup == FreqGroup.FR_QTR: + return _quarterly_finder + elif fgroup == FreqGroup.FR_MTH: + return _monthly_finder + elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK: + return _daily_finder + else: # pragma: no cover + raise NotImplementedError(f"Unsupported frequency: {dtype_code}") + + +class TimeSeries_DateLocator(Locator): + """ + Locates the ticks along an axis controlled by a :class:`Series`. + + Parameters + ---------- + freq : BaseOffset + Valid frequency specifier. + minor_locator : {False, True}, optional + Whether the locator is for minor ticks (True) or not. + dynamic_mode : {True, False}, optional + Whether the locator should work in dynamic mode. + base : {int}, optional + quarter : {int}, optional + month : {int}, optional + day : {int}, optional + """ + + axis: Axis + + def __init__( + self, + freq: BaseOffset, + minor_locator: bool = False, + dynamic_mode: bool = True, + base: int = 1, + quarter: int = 1, + month: int = 1, + day: int = 1, + plot_obj=None, + ) -> None: + freq = to_offset(freq, is_period=True) + self.freq = freq + self.base = base + (self.quarter, self.month, self.day) = (quarter, month, day) + self.isminor = minor_locator + self.isdynamic = dynamic_mode + self.offset = 0 + self.plot_obj = plot_obj + self.finder = get_finder(freq) + + def _get_default_locs(self, vmin, vmax): + """Returns the default locations of ticks.""" + locator = self.finder(vmin, vmax, self.freq) + + if self.isminor: + return np.compress(locator["min"], locator["val"]) + return np.compress(locator["maj"], locator["val"]) + + def __call__(self): + """Return the locations of the ticks.""" + # axis calls Locator.set_axis inside set_m_formatter + + vi = tuple(self.axis.get_view_interval()) + vmin, vmax = vi + if vmax < vmin: + vmin, vmax = vmax, vmin + if self.isdynamic: + locs = self._get_default_locs(vmin, vmax) + else: # pragma: no cover + base = self.base + (d, m) = divmod(vmin, base) + vmin = (d + 1) * base + # error: No overload variant of "range" matches argument types "float", + # "float", "int" + locs = list(range(vmin, vmax + 1, base)) # type: ignore[call-overload] + return locs + + def autoscale(self): + """ + Sets the view limits to the nearest multiples of base that contain the + data. + """ + # requires matplotlib >= 0.98.0 + (vmin, vmax) = self.axis.get_data_interval() + + locs = self._get_default_locs(vmin, vmax) + (vmin, vmax) = locs[[0, -1]] + if vmin == vmax: + vmin -= 1 + vmax += 1 + return nonsingular(vmin, vmax) + + +# ------------------------------------------------------------------------- +# --- Formatter --- +# ------------------------------------------------------------------------- + + +class TimeSeries_DateFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`PeriodIndex`. + + Parameters + ---------- + freq : BaseOffset + Valid frequency specifier. + minor_locator : bool, default False + Whether the current formatter should apply to minor ticks (True) or + major ticks (False). + dynamic_mode : bool, default True + Whether the formatter works in dynamic mode or not. + """ + + axis: Axis + + def __init__( + self, + freq: BaseOffset, + minor_locator: bool = False, + dynamic_mode: bool = True, + plot_obj=None, + ) -> None: + freq = to_offset(freq, is_period=True) + self.format = None + self.freq = freq + self.locs: list[Any] = [] # unused, for matplotlib compat + self.formatdict: dict[Any, Any] | None = None + self.isminor = minor_locator + self.isdynamic = dynamic_mode + self.offset = 0 + self.plot_obj = plot_obj + self.finder = get_finder(freq) + + def _set_default_format(self, vmin, vmax): + """Returns the default ticks spacing.""" + info = self.finder(vmin, vmax, self.freq) + + if self.isminor: + format = np.compress(info["min"] & np.logical_not(info["maj"]), info) + else: + format = np.compress(info["maj"], info) + self.formatdict = {x: f for (x, _, _, f) in format} + return self.formatdict + + def set_locs(self, locs) -> None: + """Sets the locations of the ticks""" + # don't actually use the locs. This is just needed to work with + # matplotlib. Force to use vmin, vmax + + self.locs = locs + + (vmin, vmax) = tuple(self.axis.get_view_interval()) + if vmax < vmin: + (vmin, vmax) = (vmax, vmin) + self._set_default_format(vmin, vmax) + + def __call__(self, x, pos: int | None = 0) -> str: + if self.formatdict is None: + return "" + else: + fmt = self.formatdict.pop(x, "") + if isinstance(fmt, np.bytes_): + fmt = fmt.decode("utf-8") + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Period with BDay freq is deprecated", + category=FutureWarning, + ) + period = Period(ordinal=int(x), freq=self.freq) + assert isinstance(period, Period) + return period.strftime(fmt) + + +class TimeSeries_TimedeltaFormatter(Formatter): + """ + Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. + """ + + axis: Axis + + @staticmethod + def format_timedelta_ticks(x, pos, n_decimals: int) -> str: + """ + Convert seconds to 'D days HH:MM:SS.F' + """ + s, ns = divmod(x, 10**9) # TODO(non-nano): this looks like it assumes ns + m, s = divmod(s, 60) + h, m = divmod(m, 60) + d, h = divmod(h, 24) + decimals = int(ns * 10 ** (n_decimals - 9)) + s = f"{int(h):02d}:{int(m):02d}:{int(s):02d}" + if n_decimals > 0: + s += f".{decimals:0{n_decimals}d}" + if d != 0: + s = f"{int(d):d} days {s}" + return s + + def __call__(self, x, pos: int | None = 0) -> str: + (vmin, vmax) = tuple(self.axis.get_view_interval()) + n_decimals = min(int(np.ceil(np.log10(100 * 10**9 / abs(vmax - vmin)))), 9) + return self.format_timedelta_ticks(x, pos, n_decimals) diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/core.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/core.py new file mode 100644 index 0000000000000000000000000000000000000000..2979903edf360de745429d2c7d7a02240221ff7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/core.py @@ -0,0 +1,2119 @@ +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from collections.abc import ( + Hashable, + Iterable, + Iterator, + Sequence, +) +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, + final, +) +import warnings + +import matplotlib as mpl +import numpy as np + +from pandas._libs import lib +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_any_real_numeric_dtype, + is_bool, + is_float, + is_float_dtype, + is_hashable, + is_integer, + is_integer_dtype, + is_iterator, + is_list_like, + is_number, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCDatetimeIndex, + ABCIndex, + ABCMultiIndex, + ABCPeriodIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.util.version import Version + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib import tools +from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters +from pandas.plotting._matplotlib.groupby import reconstruct_data_with_by +from pandas.plotting._matplotlib.misc import unpack_single_str_list +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.timeseries import ( + decorate_axes, + format_dateaxis, + maybe_convert_index, + maybe_resample, + use_dynamic_x, +) +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + format_date_labels, + get_all_lines, + get_xlim, + handle_shared_axes, +) + +if TYPE_CHECKING: + from matplotlib.artist import Artist + from matplotlib.axes import Axes + from matplotlib.axis import Axis + from matplotlib.figure import Figure + + from pandas._typing import ( + IndexLabel, + NDFrameT, + PlottingOrientation, + npt, + ) + + from pandas import Series + + +def _color_in_style(style: str) -> bool: + """ + Check if there is a color letter in the style string. + """ + from matplotlib.colors import BASE_COLORS + + return not set(BASE_COLORS).isdisjoint(style) + + +class MPLPlot(ABC): + """ + Base class for assembling a pandas plot using matplotlib + + Parameters + ---------- + data : + + """ + + @property + @abstractmethod + def _kind(self) -> str: + """Specify kind str. Must be overridden in child class""" + raise NotImplementedError + + _layout_type = "vertical" + _default_rot = 0 + + @property + def orientation(self) -> str | None: + return None + + data: DataFrame + + def __init__( + self, + data, + kind=None, + by: IndexLabel | None = None, + subplots: bool | Sequence[Sequence[str]] = False, + sharex: bool | None = None, + sharey: bool = False, + use_index: bool = True, + figsize: tuple[float, float] | None = None, + grid=None, + legend: bool | str = True, + rot=None, + ax=None, + fig=None, + title=None, + xlim=None, + ylim=None, + xticks=None, + yticks=None, + xlabel: Hashable | None = None, + ylabel: Hashable | None = None, + fontsize: int | None = None, + secondary_y: bool | tuple | list | np.ndarray = False, + colormap=None, + table: bool = False, + layout=None, + include_bool: bool = False, + column: IndexLabel | None = None, + *, + logx: bool | None | Literal["sym"] = False, + logy: bool | None | Literal["sym"] = False, + loglog: bool | None | Literal["sym"] = False, + mark_right: bool = True, + stacked: bool = False, + label: Hashable | None = None, + style=None, + **kwds, + ) -> None: + import matplotlib.pyplot as plt + + # if users assign an empty list or tuple, raise `ValueError` + # similar to current `df.box` and `df.hist` APIs. + if by in ([], ()): + raise ValueError("No group keys passed!") + self.by = com.maybe_make_list(by) + + # Assign the rest of columns into self.columns if by is explicitly defined + # while column is not, only need `columns` in hist/box plot when it's DF + # TODO: Might deprecate `column` argument in future PR (#28373) + if isinstance(data, DataFrame): + if column: + self.columns = com.maybe_make_list(column) + elif self.by is None: + self.columns = [ + col for col in data.columns if is_numeric_dtype(data[col]) + ] + else: + self.columns = [ + col + for col in data.columns + if col not in self.by and is_numeric_dtype(data[col]) + ] + + # For `hist` plot, need to get grouped original data before `self.data` is + # updated later + if self.by is not None and self._kind == "hist": + self._grouped = data.groupby(unpack_single_str_list(self.by)) + + self.kind = kind + + self.subplots = type(self)._validate_subplots_kwarg( + subplots, data, kind=self._kind + ) + + self.sharex = type(self)._validate_sharex(sharex, ax, by) + self.sharey = sharey + self.figsize = figsize + self.layout = layout + + self.xticks = xticks + self.yticks = yticks + self.xlim = xlim + self.ylim = ylim + self.title = title + self.use_index = use_index + self.xlabel = xlabel + self.ylabel = ylabel + + self.fontsize = fontsize + + if rot is not None: + self.rot = rot + # need to know for format_date_labels since it's rotated to 30 by + # default + self._rot_set = True + else: + self._rot_set = False + self.rot = self._default_rot + + if grid is None: + grid = False if secondary_y else plt.rcParams["axes.grid"] + + self.grid = grid + self.legend = legend + self.legend_handles: list[Artist] = [] + self.legend_labels: list[Hashable] = [] + + self.logx = type(self)._validate_log_kwd("logx", logx) + self.logy = type(self)._validate_log_kwd("logy", logy) + self.loglog = type(self)._validate_log_kwd("loglog", loglog) + self.label = label + self.style = style + self.mark_right = mark_right + self.stacked = stacked + + # ax may be an Axes object or (if self.subplots) an ndarray of + # Axes objects + self.ax = ax + # TODO: deprecate fig keyword as it is ignored, not passed in tests + # as of 2023-11-05 + + # parse errorbar input if given + xerr = kwds.pop("xerr", None) + yerr = kwds.pop("yerr", None) + nseries = self._get_nseries(data) + xerr, data = type(self)._parse_errorbars("xerr", xerr, data, nseries) + yerr, data = type(self)._parse_errorbars("yerr", yerr, data, nseries) + self.errors = {"xerr": xerr, "yerr": yerr} + self.data = data + + if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndex)): + secondary_y = [secondary_y] + self.secondary_y = secondary_y + + # ugly TypeError if user passes matplotlib's `cmap` name. + # Probably better to accept either. + if "cmap" in kwds and colormap: + raise TypeError("Only specify one of `cmap` and `colormap`.") + if "cmap" in kwds: + self.colormap = kwds.pop("cmap") + else: + self.colormap = colormap + + self.table = table + self.include_bool = include_bool + + self.kwds = kwds + + color = kwds.pop("color", lib.no_default) + self.color = self._validate_color_args(color, self.colormap) + assert "color" not in self.kwds + + self.data = self._ensure_frame(self.data) + + @final + @staticmethod + def _validate_sharex(sharex: bool | None, ax, by) -> bool: + if sharex is None: + # if by is defined, subplots are used and sharex should be False + if ax is None and by is None: # pylint: disable=simplifiable-if-statement + sharex = True + else: + # if we get an axis, the users should do the visibility + # setting... + sharex = False + elif not is_bool(sharex): + raise TypeError("sharex must be a bool or None") + return bool(sharex) + + @classmethod + def _validate_log_kwd( + cls, + kwd: str, + value: bool | None | Literal["sym"], + ) -> bool | None | Literal["sym"]: + if ( + value is None + or isinstance(value, bool) + or (isinstance(value, str) and value == "sym") + ): + return value + raise ValueError( + f"keyword '{kwd}' should be bool, None, or 'sym', not '{value}'" + ) + + @final + @staticmethod + def _validate_subplots_kwarg( + subplots: bool | Sequence[Sequence[str]], data: Series | DataFrame, kind: str + ) -> bool | list[tuple[int, ...]]: + """ + Validate the subplots parameter + + - check type and content + - check for duplicate columns + - check for invalid column names + - convert column names into indices + - add missing columns in a group of their own + See comments in code below for more details. + + Parameters + ---------- + subplots : subplots parameters as passed to PlotAccessor + + Returns + ------- + validated subplots : a bool or a list of tuples of column indices. Columns + in the same tuple will be grouped together in the resulting plot. + """ + + if isinstance(subplots, bool): + return subplots + elif not isinstance(subplots, Iterable): + raise ValueError("subplots should be a bool or an iterable") + + supported_kinds = ( + "line", + "bar", + "barh", + "hist", + "kde", + "density", + "area", + "pie", + ) + if kind not in supported_kinds: + raise ValueError( + "When subplots is an iterable, kind must be " + f"one of {', '.join(supported_kinds)}. Got {kind}." + ) + + if isinstance(data, ABCSeries): + raise NotImplementedError( + "An iterable subplots for a Series is not supported." + ) + + columns = data.columns + if isinstance(columns, ABCMultiIndex): + raise NotImplementedError( + "An iterable subplots for a DataFrame with a MultiIndex column " + "is not supported." + ) + + if columns.nunique() != len(columns): + raise NotImplementedError( + "An iterable subplots for a DataFrame with non-unique column " + "labels is not supported." + ) + + # subplots is a list of tuples where each tuple is a group of + # columns to be grouped together (one ax per group). + # we consolidate the subplots list such that: + # - the tuples contain indices instead of column names + # - the columns that aren't yet in the list are added in a group + # of their own. + # For example with columns from a to g, and + # subplots = [(a, c), (b, f, e)], + # we end up with [(ai, ci), (bi, fi, ei), (di,), (gi,)] + # This way, we can handle self.subplots in a homogeneous manner + # later. + # TODO: also accept indices instead of just names? + + out = [] + seen_columns: set[Hashable] = set() + for group in subplots: + if not is_list_like(group): + raise ValueError( + "When subplots is an iterable, each entry " + "should be a list/tuple of column names." + ) + idx_locs = columns.get_indexer_for(group) + if (idx_locs == -1).any(): + bad_labels = np.extract(idx_locs == -1, group) + raise ValueError( + f"Column label(s) {list(bad_labels)} not found in the DataFrame." + ) + unique_columns = set(group) + duplicates = seen_columns.intersection(unique_columns) + if duplicates: + raise ValueError( + "Each column should be in only one subplot. " + f"Columns {duplicates} were found in multiple subplots." + ) + seen_columns = seen_columns.union(unique_columns) + out.append(tuple(idx_locs)) + + unseen_columns = columns.difference(seen_columns) + for column in unseen_columns: + idx_loc = columns.get_loc(column) + out.append((idx_loc,)) + return out + + def _validate_color_args(self, color, colormap): + if color is lib.no_default: + # It was not provided by the user + if "colors" in self.kwds and colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used simultaneously. " + "Using 'color'", + stacklevel=find_stack_level(), + ) + return None + if self.nseries == 1 and color is not None and not is_list_like(color): + # support series.plot(color='green') + color = [color] + + if isinstance(color, tuple) and self.nseries == 1 and len(color) in (3, 4): + # support RGB and RGBA tuples in series plot + color = [color] + + if colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + + if self.style is not None: + if is_list_like(self.style): + styles = self.style + else: + styles = [self.style] + # need only a single match + for s in styles: + if _color_in_style(s): + raise ValueError( + "Cannot pass 'style' string with a color symbol and " + "'color' keyword argument. Please use one or the " + "other or pass 'style' without a color symbol" + ) + return color + + @final + @staticmethod + def _iter_data( + data: DataFrame | dict[Hashable, Series | DataFrame] + ) -> Iterator[tuple[Hashable, np.ndarray]]: + for col, values in data.items(): + # This was originally written to use values.values before EAs + # were implemented; adding np.asarray(...) to keep consistent + # typing. + yield col, np.asarray(values.values) + + def _get_nseries(self, data: Series | DataFrame) -> int: + # When `by` is explicitly assigned, grouped data size will be defined, and + # this will determine number of subplots to have, aka `self.nseries` + if data.ndim == 1: + return 1 + elif self.by is not None and self._kind == "hist": + return len(self._grouped) + elif self.by is not None and self._kind == "box": + return len(self.columns) + else: + return data.shape[1] + + @final + @property + def nseries(self) -> int: + return self._get_nseries(self.data) + + @final + def draw(self) -> None: + self.plt.draw_if_interactive() + + @final + def generate(self) -> None: + self._compute_plot_data() + fig = self.fig + self._make_plot(fig) + self._add_table() + self._make_legend() + self._adorn_subplots(fig) + + for ax in self.axes: + self._post_plot_logic_common(ax) + self._post_plot_logic(ax, self.data) + + @final + @staticmethod + def _has_plotted_object(ax: Axes) -> bool: + """check whether ax has data""" + return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0 + + @final + def _maybe_right_yaxis(self, ax: Axes, axes_num: int) -> Axes: + if not self.on_right(axes_num): + # secondary axes may be passed via ax kw + return self._get_ax_layer(ax) + + if hasattr(ax, "right_ax"): + # if it has right_ax property, ``ax`` must be left axes + return ax.right_ax + elif hasattr(ax, "left_ax"): + # if it has left_ax property, ``ax`` must be right axes + return ax + else: + # otherwise, create twin axes + orig_ax, new_ax = ax, ax.twinx() + # TODO: use Matplotlib public API when available + new_ax._get_lines = orig_ax._get_lines # type: ignore[attr-defined] + # TODO #54485 + new_ax._get_patches_for_fill = ( # type: ignore[attr-defined] + orig_ax._get_patches_for_fill # type: ignore[attr-defined] + ) + # TODO #54485 + orig_ax.right_ax, new_ax.left_ax = ( # type: ignore[attr-defined] + new_ax, + orig_ax, + ) + + if not self._has_plotted_object(orig_ax): # no data on left y + orig_ax.get_yaxis().set_visible(False) + + if self.logy is True or self.loglog is True: + new_ax.set_yscale("log") + elif self.logy == "sym" or self.loglog == "sym": + new_ax.set_yscale("symlog") + return new_ax # type: ignore[return-value] + + @final + @cache_readonly + def fig(self) -> Figure: + return self._axes_and_fig[1] + + @final + @cache_readonly + # TODO: can we annotate this as both a Sequence[Axes] and ndarray[object]? + def axes(self) -> Sequence[Axes]: + return self._axes_and_fig[0] + + @final + @cache_readonly + def _axes_and_fig(self) -> tuple[Sequence[Axes], Figure]: + if self.subplots: + naxes = ( + self.nseries if isinstance(self.subplots, bool) else len(self.subplots) + ) + fig, axes = create_subplots( + naxes=naxes, + sharex=self.sharex, + sharey=self.sharey, + figsize=self.figsize, + ax=self.ax, + layout=self.layout, + layout_type=self._layout_type, + ) + elif self.ax is None: + fig = self.plt.figure(figsize=self.figsize) + axes = fig.add_subplot(111) + else: + fig = self.ax.get_figure() + if self.figsize is not None: + fig.set_size_inches(self.figsize) + axes = self.ax + + axes = flatten_axes(axes) + + if self.logx is True or self.loglog is True: + [a.set_xscale("log") for a in axes] + elif self.logx == "sym" or self.loglog == "sym": + [a.set_xscale("symlog") for a in axes] + + if self.logy is True or self.loglog is True: + [a.set_yscale("log") for a in axes] + elif self.logy == "sym" or self.loglog == "sym": + [a.set_yscale("symlog") for a in axes] + + axes_seq = cast(Sequence["Axes"], axes) + return axes_seq, fig + + @property + def result(self): + """ + Return result axes + """ + if self.subplots: + if self.layout is not None and not is_list_like(self.ax): + # error: "Sequence[Any]" has no attribute "reshape" + return self.axes.reshape(*self.layout) # type: ignore[attr-defined] + else: + return self.axes + else: + sec_true = isinstance(self.secondary_y, bool) and self.secondary_y + # error: Argument 1 to "len" has incompatible type "Union[bool, + # Tuple[Any, ...], List[Any], ndarray[Any, Any]]"; expected "Sized" + all_sec = ( + is_list_like(self.secondary_y) + and len(self.secondary_y) == self.nseries # type: ignore[arg-type] + ) + if sec_true or all_sec: + # if all data is plotted on secondary, return right axes + return self._get_ax_layer(self.axes[0], primary=False) + else: + return self.axes[0] + + @final + @staticmethod + def _convert_to_ndarray(data): + # GH31357: categorical columns are processed separately + if isinstance(data.dtype, CategoricalDtype): + return data + + # GH32073: cast to float if values contain nulled integers + if (is_integer_dtype(data.dtype) or is_float_dtype(data.dtype)) and isinstance( + data.dtype, ExtensionDtype + ): + return data.to_numpy(dtype="float", na_value=np.nan) + + # GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to + # np.ndarray before plot. + if len(data) > 0: + return np.asarray(data) + + return data + + @final + def _ensure_frame(self, data) -> DataFrame: + if isinstance(data, ABCSeries): + label = self.label + if label is None and data.name is None: + label = "" + if label is None: + # We'll end up with columns of [0] instead of [None] + data = data.to_frame() + else: + data = data.to_frame(name=label) + elif self._kind in ("hist", "box"): + cols = self.columns if self.by is None else self.columns + self.by + data = data.loc[:, cols] + return data + + @final + def _compute_plot_data(self) -> None: + data = self.data + + # GH15079 reconstruct data if by is defined + if self.by is not None: + self.subplots = True + data = reconstruct_data_with_by(self.data, by=self.by, cols=self.columns) + + # GH16953, infer_objects is needed as fallback, for ``Series`` + # with ``dtype == object`` + data = data.infer_objects(copy=False) + include_type = [np.number, "datetime", "datetimetz", "timedelta"] + + # GH23719, allow plotting boolean + if self.include_bool is True: + include_type.append(np.bool_) + + # GH22799, exclude datetime-like type for boxplot + exclude_type = None + if self._kind == "box": + # TODO: change after solving issue 27881 + include_type = [np.number] + exclude_type = ["timedelta"] + + # GH 18755, include object and category type for scatter plot + if self._kind == "scatter": + include_type.extend(["object", "category", "string"]) + + numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type) + + is_empty = numeric_data.shape[-1] == 0 + # no non-numeric frames or series allowed + if is_empty: + raise TypeError("no numeric data to plot") + + self.data = numeric_data.apply(type(self)._convert_to_ndarray) + + def _make_plot(self, fig: Figure) -> None: + raise AbstractMethodError(self) + + @final + def _add_table(self) -> None: + if self.table is False: + return + elif self.table is True: + data = self.data.transpose() + else: + data = self.table + ax = self._get_ax(0) + tools.table(ax, data) + + @final + def _post_plot_logic_common(self, ax: Axes) -> None: + """Common post process for each axes""" + if self.orientation == "vertical" or self.orientation is None: + type(self)._apply_axis_properties( + ax.xaxis, rot=self.rot, fontsize=self.fontsize + ) + type(self)._apply_axis_properties(ax.yaxis, fontsize=self.fontsize) + + if hasattr(ax, "right_ax"): + type(self)._apply_axis_properties( + ax.right_ax.yaxis, fontsize=self.fontsize + ) + + elif self.orientation == "horizontal": + type(self)._apply_axis_properties( + ax.yaxis, rot=self.rot, fontsize=self.fontsize + ) + type(self)._apply_axis_properties(ax.xaxis, fontsize=self.fontsize) + + if hasattr(ax, "right_ax"): + type(self)._apply_axis_properties( + ax.right_ax.yaxis, fontsize=self.fontsize + ) + else: # pragma no cover + raise ValueError + + @abstractmethod + def _post_plot_logic(self, ax: Axes, data) -> None: + """Post process for each axes. Overridden in child classes""" + + @final + def _adorn_subplots(self, fig: Figure) -> None: + """Common post process unrelated to data""" + if len(self.axes) > 0: + all_axes = self._get_subplots(fig) + nrows, ncols = self._get_axes_layout(fig) + handle_shared_axes( + axarr=all_axes, + nplots=len(all_axes), + naxes=nrows * ncols, + nrows=nrows, + ncols=ncols, + sharex=self.sharex, + sharey=self.sharey, + ) + + for ax in self.axes: + ax = getattr(ax, "right_ax", ax) + if self.yticks is not None: + ax.set_yticks(self.yticks) + + if self.xticks is not None: + ax.set_xticks(self.xticks) + + if self.ylim is not None: + ax.set_ylim(self.ylim) + + if self.xlim is not None: + ax.set_xlim(self.xlim) + + # GH9093, currently Pandas does not show ylabel, so if users provide + # ylabel will set it as ylabel in the plot. + if self.ylabel is not None: + ax.set_ylabel(pprint_thing(self.ylabel)) + + ax.grid(self.grid) + + if self.title: + if self.subplots: + if is_list_like(self.title): + if len(self.title) != self.nseries: + raise ValueError( + "The length of `title` must equal the number " + "of columns if using `title` of type `list` " + "and `subplots=True`.\n" + f"length of title = {len(self.title)}\n" + f"number of columns = {self.nseries}" + ) + + for ax, title in zip(self.axes, self.title): + ax.set_title(title) + else: + fig.suptitle(self.title) + else: + if is_list_like(self.title): + msg = ( + "Using `title` of type `list` is not supported " + "unless `subplots=True` is passed" + ) + raise ValueError(msg) + self.axes[0].set_title(self.title) + + @final + @staticmethod + def _apply_axis_properties( + axis: Axis, rot=None, fontsize: int | None = None + ) -> None: + """ + Tick creation within matplotlib is reasonably expensive and is + internally deferred until accessed as Ticks are created/destroyed + multiple times per draw. It's therefore beneficial for us to avoid + accessing unless we will act on the Tick. + """ + if rot is not None or fontsize is not None: + # rot=0 is a valid setting, hence the explicit None check + labels = axis.get_majorticklabels() + axis.get_minorticklabels() + for label in labels: + if rot is not None: + label.set_rotation(rot) + if fontsize is not None: + label.set_fontsize(fontsize) + + @final + @property + def legend_title(self) -> str | None: + if not isinstance(self.data.columns, ABCMultiIndex): + name = self.data.columns.name + if name is not None: + name = pprint_thing(name) + return name + else: + stringified = map(pprint_thing, self.data.columns.names) + return ",".join(stringified) + + @final + def _mark_right_label(self, label: str, index: int) -> str: + """ + Append ``(right)`` to the label of a line if it's plotted on the right axis. + + Note that ``(right)`` is only appended when ``subplots=False``. + """ + if not self.subplots and self.mark_right and self.on_right(index): + label += " (right)" + return label + + @final + def _append_legend_handles_labels(self, handle: Artist, label: str) -> None: + """ + Append current handle and label to ``legend_handles`` and ``legend_labels``. + + These will be used to make the legend. + """ + self.legend_handles.append(handle) + self.legend_labels.append(label) + + def _make_legend(self) -> None: + ax, leg = self._get_ax_legend(self.axes[0]) + + handles = [] + labels = [] + title = "" + + if not self.subplots: + if leg is not None: + title = leg.get_title().get_text() + # Replace leg.legend_handles because it misses marker info + if Version(mpl.__version__) < Version("3.7"): + handles = leg.legendHandles + else: + handles = leg.legend_handles + labels = [x.get_text() for x in leg.get_texts()] + + if self.legend: + if self.legend == "reverse": + handles += reversed(self.legend_handles) + labels += reversed(self.legend_labels) + else: + handles += self.legend_handles + labels += self.legend_labels + + if self.legend_title is not None: + title = self.legend_title + + if len(handles) > 0: + ax.legend(handles, labels, loc="best", title=title) + + elif self.subplots and self.legend: + for ax in self.axes: + if ax.get_visible(): + ax.legend(loc="best") + + @final + @staticmethod + def _get_ax_legend(ax: Axes): + """ + Take in axes and return ax and legend under different scenarios + """ + leg = ax.get_legend() + + other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None) + other_leg = None + if other_ax is not None: + other_leg = other_ax.get_legend() + if leg is None and other_leg is not None: + leg = other_leg + ax = other_ax + return ax, leg + + @final + @cache_readonly + def plt(self): + import matplotlib.pyplot as plt + + return plt + + _need_to_set_index = False + + @final + def _get_xticks(self): + index = self.data.index + is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time") + + # TODO: be stricter about x? + x: list[int] | np.ndarray + if self.use_index: + if isinstance(index, ABCPeriodIndex): + # test_mixed_freq_irreg_period + x = index.to_timestamp()._mpl_repr() + # TODO: why do we need to do to_timestamp() here but not other + # places where we call mpl_repr? + elif is_any_real_numeric_dtype(index.dtype): + # Matplotlib supports numeric values or datetime objects as + # xaxis values. Taking LBYL approach here, by the time + # matplotlib raises exception when using non numeric/datetime + # values for xaxis, several actions are already taken by plt. + x = index._mpl_repr() + elif isinstance(index, ABCDatetimeIndex) or is_datetype: + x = index._mpl_repr() + else: + self._need_to_set_index = True + x = list(range(len(index))) + else: + x = list(range(len(index))) + + return x + + @classmethod + @register_pandas_matplotlib_converters + def _plot( + cls, ax: Axes, x, y: np.ndarray, style=None, is_errorbar: bool = False, **kwds + ): + mask = isna(y) + if mask.any(): + y = np.ma.array(y) + y = np.ma.masked_where(mask, y) + + if isinstance(x, ABCIndex): + x = x._mpl_repr() + + if is_errorbar: + if "xerr" in kwds: + kwds["xerr"] = np.array(kwds.get("xerr")) + if "yerr" in kwds: + kwds["yerr"] = np.array(kwds.get("yerr")) + return ax.errorbar(x, y, **kwds) + else: + # prevent style kwarg from going to errorbar, where it is unsupported + args = (x, y, style) if style is not None else (x, y) + return ax.plot(*args, **kwds) + + def _get_custom_index_name(self): + """Specify whether xlabel/ylabel should be used to override index name""" + return self.xlabel + + @final + def _get_index_name(self) -> str | None: + if isinstance(self.data.index, ABCMultiIndex): + name = self.data.index.names + if com.any_not_none(*name): + name = ",".join([pprint_thing(x) for x in name]) + else: + name = None + else: + name = self.data.index.name + if name is not None: + name = pprint_thing(name) + + # GH 45145, override the default axis label if one is provided. + index_name = self._get_custom_index_name() + if index_name is not None: + name = pprint_thing(index_name) + + return name + + @final + @classmethod + def _get_ax_layer(cls, ax, primary: bool = True): + """get left (primary) or right (secondary) axes""" + if primary: + return getattr(ax, "left_ax", ax) + else: + return getattr(ax, "right_ax", ax) + + @final + def _col_idx_to_axis_idx(self, col_idx: int) -> int: + """Return the index of the axis where the column at col_idx should be plotted""" + if isinstance(self.subplots, list): + # Subplots is a list: some columns will be grouped together in the same ax + return next( + group_idx + for (group_idx, group) in enumerate(self.subplots) + if col_idx in group + ) + else: + # subplots is True: one ax per column + return col_idx + + @final + def _get_ax(self, i: int): + # get the twinx ax if appropriate + if self.subplots: + i = self._col_idx_to_axis_idx(i) + ax = self.axes[i] + ax = self._maybe_right_yaxis(ax, i) + # error: Unsupported target for indexed assignment ("Sequence[Any]") + self.axes[i] = ax # type: ignore[index] + else: + ax = self.axes[0] + ax = self._maybe_right_yaxis(ax, i) + + ax.get_yaxis().set_visible(True) + return ax + + @final + def on_right(self, i: int): + if isinstance(self.secondary_y, bool): + return self.secondary_y + + if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndex)): + return self.data.columns[i] in self.secondary_y + + @final + def _apply_style_colors( + self, colors, kwds: dict[str, Any], col_num: int, label: str + ): + """ + Manage style and color based on column number and its label. + Returns tuple of appropriate style and kwds which "color" may be added. + """ + style = None + if self.style is not None: + if isinstance(self.style, list): + try: + style = self.style[col_num] + except IndexError: + pass + elif isinstance(self.style, dict): + style = self.style.get(label, style) + else: + style = self.style + + has_color = "color" in kwds or self.colormap is not None + nocolor_style = style is None or not _color_in_style(style) + if (has_color or self.subplots) and nocolor_style: + if isinstance(colors, dict): + kwds["color"] = colors[label] + else: + kwds["color"] = colors[col_num % len(colors)] + return style, kwds + + def _get_colors( + self, + num_colors: int | None = None, + color_kwds: str = "color", + ): + if num_colors is None: + num_colors = self.nseries + if color_kwds == "color": + color = self.color + else: + color = self.kwds.get(color_kwds) + return get_standard_colors( + num_colors=num_colors, + colormap=self.colormap, + color=color, + ) + + # TODO: tighter typing for first return? + @final + @staticmethod + def _parse_errorbars( + label: str, err, data: NDFrameT, nseries: int + ) -> tuple[Any, NDFrameT]: + """ + Look for error keyword arguments and return the actual errorbar data + or return the error DataFrame/dict + + Error bars can be specified in several ways: + Series: the user provides a pandas.Series object of the same + length as the data + ndarray: provides a np.ndarray of the same length as the data + DataFrame/dict: error values are paired with keys matching the + key in the plotted DataFrame + str: the name of the column within the plotted DataFrame + + Asymmetrical error bars are also supported, however raw error values + must be provided in this case. For a ``N`` length :class:`Series`, a + ``2xN`` array should be provided indicating lower and upper (or left + and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors + should be in a ``Mx2xN`` array. + """ + if err is None: + return None, data + + def match_labels(data, e): + e = e.reindex(data.index) + return e + + # key-matched DataFrame + if isinstance(err, ABCDataFrame): + err = match_labels(data, err) + # key-matched dict + elif isinstance(err, dict): + pass + + # Series of error values + elif isinstance(err, ABCSeries): + # broadcast error series across data + err = match_labels(data, err) + err = np.atleast_2d(err) + err = np.tile(err, (nseries, 1)) + + # errors are a column in the dataframe + elif isinstance(err, str): + evalues = data[err].values + data = data[data.columns.drop(err)] + err = np.atleast_2d(evalues) + err = np.tile(err, (nseries, 1)) + + elif is_list_like(err): + if is_iterator(err): + err = np.atleast_2d(list(err)) + else: + # raw error values + err = np.atleast_2d(err) + + err_shape = err.shape + + # asymmetrical error bars + if isinstance(data, ABCSeries) and err_shape[0] == 2: + err = np.expand_dims(err, 0) + err_shape = err.shape + if err_shape[2] != len(data): + raise ValueError( + "Asymmetrical error bars should be provided " + f"with the shape (2, {len(data)})" + ) + elif isinstance(data, ABCDataFrame) and err.ndim == 3: + if ( + (err_shape[0] != nseries) + or (err_shape[1] != 2) + or (err_shape[2] != len(data)) + ): + raise ValueError( + "Asymmetrical error bars should be provided " + f"with the shape ({nseries}, 2, {len(data)})" + ) + + # broadcast errors to each data series + if len(err) == 1: + err = np.tile(err, (nseries, 1)) + + elif is_number(err): + err = np.tile( + [err], + (nseries, len(data)), + ) + + else: + msg = f"No valid {label} detected" + raise ValueError(msg) + + return err, data + + @final + def _get_errorbars( + self, label=None, index=None, xerr: bool = True, yerr: bool = True + ) -> dict[str, Any]: + errors = {} + + for kw, flag in zip(["xerr", "yerr"], [xerr, yerr]): + if flag: + err = self.errors[kw] + # user provided label-matched dataframe of errors + if isinstance(err, (ABCDataFrame, dict)): + if label is not None and label in err.keys(): + err = err[label] + else: + err = None + elif index is not None and err is not None: + err = err[index] + + if err is not None: + errors[kw] = err + return errors + + @final + def _get_subplots(self, fig: Figure): + if Version(mpl.__version__) < Version("3.8"): + from matplotlib.axes import Subplot as Klass + else: + from matplotlib.axes import Axes as Klass + + return [ + ax + for ax in fig.get_axes() + if (isinstance(ax, Klass) and ax.get_subplotspec() is not None) + ] + + @final + def _get_axes_layout(self, fig: Figure) -> tuple[int, int]: + axes = self._get_subplots(fig) + x_set = set() + y_set = set() + for ax in axes: + # check axes coordinates to estimate layout + points = ax.get_position().get_points() + x_set.add(points[0][0]) + y_set.add(points[0][1]) + return (len(y_set), len(x_set)) + + +class PlanePlot(MPLPlot, ABC): + """ + Abstract class for plotting on plane, currently scatter and hexbin. + """ + + _layout_type = "single" + + def __init__(self, data, x, y, **kwargs) -> None: + MPLPlot.__init__(self, data, **kwargs) + if x is None or y is None: + raise ValueError(self._kind + " requires an x and y column") + if is_integer(x) and not self.data.columns._holds_integer(): + x = self.data.columns[x] + if is_integer(y) and not self.data.columns._holds_integer(): + y = self.data.columns[y] + + self.x = x + self.y = y + + @final + def _get_nseries(self, data: Series | DataFrame) -> int: + return 1 + + @final + def _post_plot_logic(self, ax: Axes, data) -> None: + x, y = self.x, self.y + xlabel = self.xlabel if self.xlabel is not None else pprint_thing(x) + ylabel = self.ylabel if self.ylabel is not None else pprint_thing(y) + # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible + # type "Hashable"; expected "str" + ax.set_xlabel(xlabel) # type: ignore[arg-type] + ax.set_ylabel(ylabel) # type: ignore[arg-type] + + @final + def _plot_colorbar(self, ax: Axes, *, fig: Figure, **kwds): + # Addresses issues #10611 and #10678: + # When plotting scatterplots and hexbinplots in IPython + # inline backend the colorbar axis height tends not to + # exactly match the parent axis height. + # The difference is due to small fractional differences + # in floating points with similar representation. + # To deal with this, this method forces the colorbar + # height to take the height of the parent axes. + # For a more detailed description of the issue + # see the following link: + # https://github.com/ipython/ipython/issues/11215 + + # GH33389, if ax is used multiple times, we should always + # use the last one which contains the latest information + # about the ax + img = ax.collections[-1] + return fig.colorbar(img, ax=ax, **kwds) + + +class ScatterPlot(PlanePlot): + @property + def _kind(self) -> Literal["scatter"]: + return "scatter" + + def __init__( + self, + data, + x, + y, + s=None, + c=None, + *, + colorbar: bool | lib.NoDefault = lib.no_default, + norm=None, + **kwargs, + ) -> None: + if s is None: + # hide the matplotlib default for size, in case we want to change + # the handling of this argument later + s = 20 + elif is_hashable(s) and s in data.columns: + s = data[s] + self.s = s + + self.colorbar = colorbar + self.norm = norm + + super().__init__(data, x, y, **kwargs) + if is_integer(c) and not self.data.columns._holds_integer(): + c = self.data.columns[c] + self.c = c + + def _make_plot(self, fig: Figure) -> None: + x, y, c, data = self.x, self.y, self.c, self.data + ax = self.axes[0] + + c_is_column = is_hashable(c) and c in self.data.columns + + color_by_categorical = c_is_column and isinstance( + self.data[c].dtype, CategoricalDtype + ) + + color = self.color + c_values = self._get_c_values(color, color_by_categorical, c_is_column) + norm, cmap = self._get_norm_and_cmap(c_values, color_by_categorical) + cb = self._get_colorbar(c_values, c_is_column) + + if self.legend: + label = self.label + else: + label = None + scatter = ax.scatter( + data[x].values, + data[y].values, + c=c_values, + label=label, + cmap=cmap, + norm=norm, + s=self.s, + **self.kwds, + ) + if cb: + cbar_label = c if c_is_column else "" + cbar = self._plot_colorbar(ax, fig=fig, label=cbar_label) + if color_by_categorical: + n_cats = len(self.data[c].cat.categories) + cbar.set_ticks(np.linspace(0.5, n_cats - 0.5, n_cats)) + cbar.ax.set_yticklabels(self.data[c].cat.categories) + + if label is not None: + self._append_legend_handles_labels( + # error: Argument 2 to "_append_legend_handles_labels" of + # "MPLPlot" has incompatible type "Hashable"; expected "str" + scatter, + label, # type: ignore[arg-type] + ) + + errors_x = self._get_errorbars(label=x, index=0, yerr=False) + errors_y = self._get_errorbars(label=y, index=0, xerr=False) + if len(errors_x) > 0 or len(errors_y) > 0: + err_kwds = dict(errors_x, **errors_y) + err_kwds["ecolor"] = scatter.get_facecolor()[0] + ax.errorbar(data[x].values, data[y].values, linestyle="none", **err_kwds) + + def _get_c_values(self, color, color_by_categorical: bool, c_is_column: bool): + c = self.c + if c is not None and color is not None: + raise TypeError("Specify exactly one of `c` and `color`") + if c is None and color is None: + c_values = self.plt.rcParams["patch.facecolor"] + elif color is not None: + c_values = color + elif color_by_categorical: + c_values = self.data[c].cat.codes + elif c_is_column: + c_values = self.data[c].values + else: + c_values = c + return c_values + + def _get_norm_and_cmap(self, c_values, color_by_categorical: bool): + c = self.c + if self.colormap is not None: + cmap = mpl.colormaps.get_cmap(self.colormap) + # cmap is only used if c_values are integers, otherwise UserWarning. + # GH-53908: additionally call isinstance() because is_integer_dtype + # returns True for "b" (meaning "blue" and not int8 in this context) + elif not isinstance(c_values, str) and is_integer_dtype(c_values): + # pandas uses colormap, matplotlib uses cmap. + cmap = mpl.colormaps["Greys"] + else: + cmap = None + + if color_by_categorical and cmap is not None: + from matplotlib import colors + + n_cats = len(self.data[c].cat.categories) + cmap = colors.ListedColormap([cmap(i) for i in range(cmap.N)]) + bounds = np.linspace(0, n_cats, n_cats + 1) + norm = colors.BoundaryNorm(bounds, cmap.N) + # TODO: warn that we are ignoring self.norm if user specified it? + # Doesn't happen in any tests 2023-11-09 + else: + norm = self.norm + return norm, cmap + + def _get_colorbar(self, c_values, c_is_column: bool) -> bool: + # plot colorbar if + # 1. colormap is assigned, and + # 2.`c` is a column containing only numeric values + plot_colorbar = self.colormap or c_is_column + cb = self.colorbar + if cb is lib.no_default: + return is_numeric_dtype(c_values) and plot_colorbar + return cb + + +class HexBinPlot(PlanePlot): + @property + def _kind(self) -> Literal["hexbin"]: + return "hexbin" + + def __init__(self, data, x, y, C=None, *, colorbar: bool = True, **kwargs) -> None: + super().__init__(data, x, y, **kwargs) + if is_integer(C) and not self.data.columns._holds_integer(): + C = self.data.columns[C] + self.C = C + + self.colorbar = colorbar + + # Scatter plot allows to plot objects data + if len(self.data[self.x]._get_numeric_data()) == 0: + raise ValueError(self._kind + " requires x column to be numeric") + if len(self.data[self.y]._get_numeric_data()) == 0: + raise ValueError(self._kind + " requires y column to be numeric") + + def _make_plot(self, fig: Figure) -> None: + x, y, data, C = self.x, self.y, self.data, self.C + ax = self.axes[0] + # pandas uses colormap, matplotlib uses cmap. + cmap = self.colormap or "BuGn" + cmap = mpl.colormaps.get_cmap(cmap) + cb = self.colorbar + + if C is None: + c_values = None + else: + c_values = data[C].values + + ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, **self.kwds) + if cb: + self._plot_colorbar(ax, fig=fig) + + def _make_legend(self) -> None: + pass + + +class LinePlot(MPLPlot): + _default_rot = 0 + + @property + def orientation(self) -> PlottingOrientation: + return "vertical" + + @property + def _kind(self) -> Literal["line", "area", "hist", "kde", "box"]: + return "line" + + def __init__(self, data, **kwargs) -> None: + from pandas.plotting import plot_params + + MPLPlot.__init__(self, data, **kwargs) + if self.stacked: + self.data = self.data.fillna(value=0) + self.x_compat = plot_params["x_compat"] + if "x_compat" in self.kwds: + self.x_compat = bool(self.kwds.pop("x_compat")) + + @final + def _is_ts_plot(self) -> bool: + # this is slightly deceptive + return not self.x_compat and self.use_index and self._use_dynamic_x() + + @final + def _use_dynamic_x(self) -> bool: + return use_dynamic_x(self._get_ax(0), self.data) + + def _make_plot(self, fig: Figure) -> None: + if self._is_ts_plot(): + data = maybe_convert_index(self._get_ax(0), self.data) + + x = data.index # dummy, not used + plotf = self._ts_plot + it = data.items() + else: + x = self._get_xticks() + # error: Incompatible types in assignment (expression has type + # "Callable[[Any, Any, Any, Any, Any, Any, KwArg(Any)], Any]", variable has + # type "Callable[[Any, Any, Any, Any, KwArg(Any)], Any]") + plotf = self._plot # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type + # "Iterator[tuple[Hashable, ndarray[Any, Any]]]", variable has + # type "Iterable[tuple[Hashable, Series]]") + it = self._iter_data(data=self.data) # type: ignore[assignment] + + stacking_id = self._get_stacking_id() + is_errorbar = com.any_not_none(*self.errors.values()) + + colors = self._get_colors() + for i, (label, y) in enumerate(it): + ax = self._get_ax(i) + kwds = self.kwds.copy() + if self.color is not None: + kwds["color"] = self.color + style, kwds = self._apply_style_colors( + colors, + kwds, + i, + # error: Argument 4 to "_apply_style_colors" of "MPLPlot" has + # incompatible type "Hashable"; expected "str" + label, # type: ignore[arg-type] + ) + + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + + label = pprint_thing(label) + label = self._mark_right_label(label, index=i) + kwds["label"] = label + + newlines = plotf( + ax, + x, + y, + style=style, + column_num=i, + stacking_id=stacking_id, + is_errorbar=is_errorbar, + **kwds, + ) + self._append_legend_handles_labels(newlines[0], label) + + if self._is_ts_plot(): + # reset of xlim should be used for ts data + # TODO: GH28021, should find a way to change view limit on xaxis + lines = get_all_lines(ax) + left, right = get_xlim(lines) + ax.set_xlim(left, right) + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y: np.ndarray, + style=None, + column_num=None, + stacking_id=None, + **kwds, + ): + # column_num is used to get the target column from plotf in line and + # area plots + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(y)) + y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"]) + lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds) + cls._update_stacker(ax, stacking_id, y) + return lines + + @final + def _ts_plot(self, ax: Axes, x, data: Series, style=None, **kwds): + # accept x to be consistent with normal plot func, + # x is not passed to tsplot as it uses data.index as x coordinate + # column_num must be in kwds for stacking purpose + freq, data = maybe_resample(data, ax, kwds) + + # Set ax with freq info + decorate_axes(ax, freq) + # digging deeper + if hasattr(ax, "left_ax"): + decorate_axes(ax.left_ax, freq) + if hasattr(ax, "right_ax"): + decorate_axes(ax.right_ax, freq) + # TODO #54485 + ax._plot_data.append((data, self._kind, kwds)) # type: ignore[attr-defined] + + lines = self._plot(ax, data.index, np.asarray(data.values), style=style, **kwds) + # set date formatter, locators and rescale limits + # TODO #54485 + format_dateaxis(ax, ax.freq, data.index) # type: ignore[arg-type, attr-defined] + return lines + + @final + def _get_stacking_id(self) -> int | None: + if self.stacked: + return id(self.data) + else: + return None + + @final + @classmethod + def _initialize_stacker(cls, ax: Axes, stacking_id, n: int) -> None: + if stacking_id is None: + return + if not hasattr(ax, "_stacker_pos_prior"): + # TODO #54485 + ax._stacker_pos_prior = {} # type: ignore[attr-defined] + if not hasattr(ax, "_stacker_neg_prior"): + # TODO #54485 + ax._stacker_neg_prior = {} # type: ignore[attr-defined] + # TODO #54485 + ax._stacker_pos_prior[stacking_id] = np.zeros(n) # type: ignore[attr-defined] + # TODO #54485 + ax._stacker_neg_prior[stacking_id] = np.zeros(n) # type: ignore[attr-defined] + + @final + @classmethod + def _get_stacked_values( + cls, ax: Axes, stacking_id: int | None, values: np.ndarray, label + ) -> np.ndarray: + if stacking_id is None: + return values + if not hasattr(ax, "_stacker_pos_prior"): + # stacker may not be initialized for subplots + cls._initialize_stacker(ax, stacking_id, len(values)) + + if (values >= 0).all(): + # TODO #54485 + return ( + ax._stacker_pos_prior[stacking_id] # type: ignore[attr-defined] + + values + ) + elif (values <= 0).all(): + # TODO #54485 + return ( + ax._stacker_neg_prior[stacking_id] # type: ignore[attr-defined] + + values + ) + + raise ValueError( + "When stacked is True, each column must be either " + "all positive or all negative. " + f"Column '{label}' contains both positive and negative values" + ) + + @final + @classmethod + def _update_stacker(cls, ax: Axes, stacking_id: int | None, values) -> None: + if stacking_id is None: + return + if (values >= 0).all(): + # TODO #54485 + ax._stacker_pos_prior[stacking_id] += values # type: ignore[attr-defined] + elif (values <= 0).all(): + # TODO #54485 + ax._stacker_neg_prior[stacking_id] += values # type: ignore[attr-defined] + + def _post_plot_logic(self, ax: Axes, data) -> None: + from matplotlib.ticker import FixedLocator + + def get_label(i): + if is_float(i) and i.is_integer(): + i = int(i) + try: + return pprint_thing(data.index[i]) + except Exception: + return "" + + if self._need_to_set_index: + xticks = ax.get_xticks() + xticklabels = [get_label(x) for x in xticks] + # error: Argument 1 to "FixedLocator" has incompatible type "ndarray[Any, + # Any]"; expected "Sequence[float]" + ax.xaxis.set_major_locator(FixedLocator(xticks)) # type: ignore[arg-type] + ax.set_xticklabels(xticklabels) + + # If the index is an irregular time series, then by default + # we rotate the tick labels. The exception is if there are + # subplots which don't share their x-axes, in which we case + # we don't rotate the ticklabels as by default the subplots + # would be too close together. + condition = ( + not self._use_dynamic_x() + and (data.index._is_all_dates and self.use_index) + and (not self.subplots or (self.subplots and self.sharex)) + ) + + index_name = self._get_index_name() + + if condition: + # irregular TS rotated 30 deg. by default + # probably a better place to check / set this. + if not self._rot_set: + self.rot = 30 + format_date_labels(ax, rot=self.rot) + + if index_name is not None and self.use_index: + ax.set_xlabel(index_name) + + +class AreaPlot(LinePlot): + @property + def _kind(self) -> Literal["area"]: + return "area" + + def __init__(self, data, **kwargs) -> None: + kwargs.setdefault("stacked", True) + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + data = data.fillna(value=0) + LinePlot.__init__(self, data, **kwargs) + + if not self.stacked: + # use smaller alpha to distinguish overlap + self.kwds.setdefault("alpha", 0.5) + + if self.logy or self.loglog: + raise ValueError("Log-y scales are not supported in area plot") + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y: np.ndarray, + style=None, + column_num=None, + stacking_id=None, + is_errorbar: bool = False, + **kwds, + ): + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(y)) + y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"]) + + # need to remove label, because subplots uses mpl legend as it is + line_kwds = kwds.copy() + line_kwds.pop("label") + lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds) + + # get data from the line to get coordinates for fill_between + xdata, y_values = lines[0].get_data(orig=False) + + # unable to use ``_get_stacked_values`` here to get starting point + if stacking_id is None: + start = np.zeros(len(y)) + elif (y >= 0).all(): + # TODO #54485 + start = ax._stacker_pos_prior[stacking_id] # type: ignore[attr-defined] + elif (y <= 0).all(): + # TODO #54485 + start = ax._stacker_neg_prior[stacking_id] # type: ignore[attr-defined] + else: + start = np.zeros(len(y)) + + if "color" not in kwds: + kwds["color"] = lines[0].get_color() + + rect = ax.fill_between(xdata, start, y_values, **kwds) + cls._update_stacker(ax, stacking_id, y) + + # LinePlot expects list of artists + res = [rect] + return res + + def _post_plot_logic(self, ax: Axes, data) -> None: + LinePlot._post_plot_logic(self, ax, data) + + is_shared_y = len(list(ax.get_shared_y_axes())) > 0 + # do not override the default axis behaviour in case of shared y axes + if self.ylim is None and not is_shared_y: + if (data >= 0).all().all(): + ax.set_ylim(0, None) + elif (data <= 0).all().all(): + ax.set_ylim(None, 0) + + +class BarPlot(MPLPlot): + @property + def _kind(self) -> Literal["bar", "barh"]: + return "bar" + + _default_rot = 90 + + @property + def orientation(self) -> PlottingOrientation: + return "vertical" + + def __init__( + self, + data, + *, + align="center", + bottom=0, + left=0, + width=0.5, + position=0.5, + log=False, + **kwargs, + ) -> None: + # we have to treat a series differently than a + # 1-column DataFrame w.r.t. color handling + self._is_series = isinstance(data, ABCSeries) + self.bar_width = width + self._align = align + self._position = position + self.tick_pos = np.arange(len(data)) + + if is_list_like(bottom): + bottom = np.array(bottom) + if is_list_like(left): + left = np.array(left) + self.bottom = bottom + self.left = left + + self.log = log + + MPLPlot.__init__(self, data, **kwargs) + + @cache_readonly + def ax_pos(self) -> np.ndarray: + return self.tick_pos - self.tickoffset + + @cache_readonly + def tickoffset(self): + if self.stacked or self.subplots: + return self.bar_width * self._position + elif self._align == "edge": + w = self.bar_width / self.nseries + return self.bar_width * (self._position - 0.5) + w * 0.5 + else: + return self.bar_width * self._position + + @cache_readonly + def lim_offset(self): + if self.stacked or self.subplots: + if self._align == "edge": + return self.bar_width / 2 + else: + return 0 + elif self._align == "edge": + w = self.bar_width / self.nseries + return w * 0.5 + else: + return 0 + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y: np.ndarray, + w, + start: int | npt.NDArray[np.intp] = 0, + log: bool = False, + **kwds, + ): + return ax.bar(x, y, w, bottom=start, log=log, **kwds) + + @property + def _start_base(self): + return self.bottom + + def _make_plot(self, fig: Figure) -> None: + colors = self._get_colors() + ncolors = len(colors) + + pos_prior = neg_prior = np.zeros(len(self.data)) + K = self.nseries + + data = self.data.fillna(0) + for i, (label, y) in enumerate(self._iter_data(data=data)): + ax = self._get_ax(i) + kwds = self.kwds.copy() + if self._is_series: + kwds["color"] = colors + elif isinstance(colors, dict): + kwds["color"] = colors[label] + else: + kwds["color"] = colors[i % ncolors] + + errors = self._get_errorbars(label=label, index=i) + kwds = dict(kwds, **errors) + + label = pprint_thing(label) + label = self._mark_right_label(label, index=i) + + if (("yerr" in kwds) or ("xerr" in kwds)) and (kwds.get("ecolor") is None): + kwds["ecolor"] = mpl.rcParams["xtick.color"] + + start = 0 + if self.log and (y >= 1).all(): + start = 1 + start = start + self._start_base + + kwds["align"] = self._align + if self.subplots: + w = self.bar_width / 2 + rect = self._plot( + ax, + self.ax_pos + w, + y, + self.bar_width, + start=start, + label=label, + log=self.log, + **kwds, + ) + ax.set_title(label) + elif self.stacked: + mask = y > 0 + start = np.where(mask, pos_prior, neg_prior) + self._start_base + w = self.bar_width / 2 + rect = self._plot( + ax, + self.ax_pos + w, + y, + self.bar_width, + start=start, + label=label, + log=self.log, + **kwds, + ) + pos_prior = pos_prior + np.where(mask, y, 0) + neg_prior = neg_prior + np.where(mask, 0, y) + else: + w = self.bar_width / K + rect = self._plot( + ax, + self.ax_pos + (i + 0.5) * w, + y, + w, + start=start, + label=label, + log=self.log, + **kwds, + ) + self._append_legend_handles_labels(rect, label) + + def _post_plot_logic(self, ax: Axes, data) -> None: + if self.use_index: + str_index = [pprint_thing(key) for key in data.index] + else: + str_index = [pprint_thing(key) for key in range(data.shape[0])] + + s_edge = self.ax_pos[0] - 0.25 + self.lim_offset + e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset + + self._decorate_ticks(ax, self._get_index_name(), str_index, s_edge, e_edge) + + def _decorate_ticks( + self, + ax: Axes, + name: str | None, + ticklabels: list[str], + start_edge: float, + end_edge: float, + ) -> None: + ax.set_xlim((start_edge, end_edge)) + + if self.xticks is not None: + ax.set_xticks(np.array(self.xticks)) + else: + ax.set_xticks(self.tick_pos) + ax.set_xticklabels(ticklabels) + + if name is not None and self.use_index: + ax.set_xlabel(name) + + +class BarhPlot(BarPlot): + @property + def _kind(self) -> Literal["barh"]: + return "barh" + + _default_rot = 0 + + @property + def orientation(self) -> Literal["horizontal"]: + return "horizontal" + + @property + def _start_base(self): + return self.left + + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + x, + y: np.ndarray, + w, + start: int | npt.NDArray[np.intp] = 0, + log: bool = False, + **kwds, + ): + return ax.barh(x, y, w, left=start, log=log, **kwds) + + def _get_custom_index_name(self): + return self.ylabel + + def _decorate_ticks( + self, + ax: Axes, + name: str | None, + ticklabels: list[str], + start_edge: float, + end_edge: float, + ) -> None: + # horizontal bars + ax.set_ylim((start_edge, end_edge)) + ax.set_yticks(self.tick_pos) + ax.set_yticklabels(ticklabels) + if name is not None and self.use_index: + ax.set_ylabel(name) + # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible type + # "Hashable | None"; expected "str" + ax.set_xlabel(self.xlabel) # type: ignore[arg-type] + + +class PiePlot(MPLPlot): + @property + def _kind(self) -> Literal["pie"]: + return "pie" + + _layout_type = "horizontal" + + def __init__(self, data, kind=None, **kwargs) -> None: + data = data.fillna(value=0) + if (data < 0).any().any(): + raise ValueError(f"{self._kind} plot doesn't allow negative values") + MPLPlot.__init__(self, data, kind=kind, **kwargs) + + @classmethod + def _validate_log_kwd( + cls, + kwd: str, + value: bool | None | Literal["sym"], + ) -> bool | None | Literal["sym"]: + super()._validate_log_kwd(kwd=kwd, value=value) + if value is not False: + warnings.warn( + f"PiePlot ignores the '{kwd}' keyword", + UserWarning, + stacklevel=find_stack_level(), + ) + return False + + def _validate_color_args(self, color, colormap) -> None: + # TODO: warn if color is passed and ignored? + return None + + def _make_plot(self, fig: Figure) -> None: + colors = self._get_colors(num_colors=len(self.data), color_kwds="colors") + self.kwds.setdefault("colors", colors) + + for i, (label, y) in enumerate(self._iter_data(data=self.data)): + ax = self._get_ax(i) + if label is not None: + label = pprint_thing(label) + ax.set_ylabel(label) + + kwds = self.kwds.copy() + + def blank_labeler(label, value): + if value == 0: + return "" + else: + return label + + idx = [pprint_thing(v) for v in self.data.index] + labels = kwds.pop("labels", idx) + # labels is used for each wedge's labels + # Blank out labels for values of 0 so they don't overlap + # with nonzero wedges + if labels is not None: + blabels = [blank_labeler(left, value) for left, value in zip(labels, y)] + else: + blabels = None + results = ax.pie(y, labels=blabels, **kwds) + + if kwds.get("autopct", None) is not None: + patches, texts, autotexts = results + else: + patches, texts = results + autotexts = [] + + if self.fontsize is not None: + for t in texts + autotexts: + t.set_fontsize(self.fontsize) + + # leglabels is used for legend labels + leglabels = labels if labels is not None else idx + for _patch, _leglabel in zip(patches, leglabels): + self._append_legend_handles_labels(_patch, _leglabel) + + def _post_plot_logic(self, ax: Axes, data) -> None: + pass diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/groupby.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb66065a8039c63b7181619aea3aa74277da4a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/groupby.py @@ -0,0 +1,142 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.missing import remove_na_arraylike + +from pandas import ( + MultiIndex, + concat, +) + +from pandas.plotting._matplotlib.misc import unpack_single_str_list + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import IndexLabel + + from pandas import ( + DataFrame, + Series, + ) + + +def create_iter_data_given_by( + data: DataFrame, kind: str = "hist" +) -> dict[Hashable, DataFrame | Series]: + """ + Create data for iteration given `by` is assigned or not, and it is only + used in both hist and boxplot. + + If `by` is assigned, return a dictionary of DataFrames in which the key of + dictionary is the values in groups. + If `by` is not assigned, return input as is, and this preserves current + status of iter_data. + + Parameters + ---------- + data : reformatted grouped data from `_compute_plot_data` method. + kind : str, plot kind. This function is only used for `hist` and `box` plots. + + Returns + ------- + iter_data : DataFrame or Dictionary of DataFrames + + Examples + -------- + If `by` is assigned: + + >>> import numpy as np + >>> tuples = [('h1', 'a'), ('h1', 'b'), ('h2', 'a'), ('h2', 'b')] + >>> mi = pd.MultiIndex.from_tuples(tuples) + >>> value = [[1, 3, np.nan, np.nan], + ... [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] + >>> data = pd.DataFrame(value, columns=mi) + >>> create_iter_data_given_by(data) + {'h1': h1 + a b + 0 1.0 3.0 + 1 3.0 4.0 + 2 NaN NaN, 'h2': h2 + a b + 0 NaN NaN + 1 NaN NaN + 2 5.0 6.0} + """ + + # For `hist` plot, before transformation, the values in level 0 are values + # in groups and subplot titles, and later used for column subselection and + # iteration; For `box` plot, values in level 1 are column names to show, + # and are used for iteration and as subplots titles. + if kind == "hist": + level = 0 + else: + level = 1 + + # Select sub-columns based on the value of level of MI, and if `by` is + # assigned, data must be a MI DataFrame + assert isinstance(data.columns, MultiIndex) + return { + col: data.loc[:, data.columns.get_level_values(level) == col] + for col in data.columns.levels[level] + } + + +def reconstruct_data_with_by( + data: DataFrame, by: IndexLabel, cols: IndexLabel +) -> DataFrame: + """ + Internal function to group data, and reassign multiindex column names onto the + result in order to let grouped data be used in _compute_plot_data method. + + Parameters + ---------- + data : Original DataFrame to plot + by : grouped `by` parameter selected by users + cols : columns of data set (excluding columns used in `by`) + + Returns + ------- + Output is the reconstructed DataFrame with MultiIndex columns. The first level + of MI is unique values of groups, and second level of MI is the columns + selected by users. + + Examples + -------- + >>> d = {'h': ['h1', 'h1', 'h2'], 'a': [1, 3, 5], 'b': [3, 4, 6]} + >>> df = pd.DataFrame(d) + >>> reconstruct_data_with_by(df, by='h', cols=['a', 'b']) + h1 h2 + a b a b + 0 1.0 3.0 NaN NaN + 1 3.0 4.0 NaN NaN + 2 NaN NaN 5.0 6.0 + """ + by_modified = unpack_single_str_list(by) + grouped = data.groupby(by_modified) + + data_list = [] + for key, group in grouped: + # error: List item 1 has incompatible type "Union[Hashable, + # Sequence[Hashable]]"; expected "Iterable[Hashable]" + columns = MultiIndex.from_product([[key], cols]) # type: ignore[list-item] + sub_group = group[cols] + sub_group.columns = columns + data_list.append(sub_group) + + data = concat(data_list, axis=1) + return data + + +def reformat_hist_y_given_by(y: np.ndarray, by: IndexLabel | None) -> np.ndarray: + """Internal function to reformat y given `by` is applied or not for hist plot. + + If by is None, input y is 1-d with NaN removed; and if by is not None, groupby + will take place and input y is multi-dimensional array. + """ + if by is not None and len(y.shape) > 1: + return np.array([remove_na_arraylike(col) for col in y.T]).T + return remove_na_arraylike(y) diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/hist.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/hist.py new file mode 100644 index 0000000000000000000000000000000000000000..e610f1adb602c46ffd7affa50c0f857ad7d030e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/hist.py @@ -0,0 +1,581 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Literal, + final, +) + +import numpy as np + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, +) +from pandas.core.dtypes.missing import ( + isna, + remove_na_arraylike, +) + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.core import ( + LinePlot, + MPLPlot, +) +from pandas.plotting._matplotlib.groupby import ( + create_iter_data_given_by, + reformat_hist_y_given_by, +) +from pandas.plotting._matplotlib.misc import unpack_single_str_list +from pandas.plotting._matplotlib.tools import ( + create_subplots, + flatten_axes, + maybe_adjust_figure, + set_ticks_props, +) + +if TYPE_CHECKING: + from matplotlib.axes import Axes + from matplotlib.figure import Figure + + from pandas._typing import PlottingOrientation + + from pandas import ( + DataFrame, + Series, + ) + + +class HistPlot(LinePlot): + @property + def _kind(self) -> Literal["hist", "kde"]: + return "hist" + + def __init__( + self, + data, + bins: int | np.ndarray | list[np.ndarray] = 10, + bottom: int | np.ndarray = 0, + *, + range=None, + weights=None, + **kwargs, + ) -> None: + if is_list_like(bottom): + bottom = np.array(bottom) + self.bottom = bottom + + self._bin_range = range + self.weights = weights + + self.xlabel = kwargs.get("xlabel") + self.ylabel = kwargs.get("ylabel") + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called + + self.bins = self._adjust_bins(bins) + + def _adjust_bins(self, bins: int | np.ndarray | list[np.ndarray]): + if is_integer(bins): + if self.by is not None: + by_modified = unpack_single_str_list(self.by) + grouped = self.data.groupby(by_modified)[self.columns] + bins = [self._calculate_bins(group, bins) for key, group in grouped] + else: + bins = self._calculate_bins(self.data, bins) + return bins + + def _calculate_bins(self, data: Series | DataFrame, bins) -> np.ndarray: + """Calculate bins given data""" + nd_values = data.infer_objects(copy=False)._get_numeric_data() + values = np.ravel(nd_values) + values = values[~isna(values)] + + hist, bins = np.histogram(values, bins=bins, range=self._bin_range) + return bins + + # error: Signature of "_plot" incompatible with supertype "LinePlot" + @classmethod + def _plot( # type: ignore[override] + cls, + ax: Axes, + y: np.ndarray, + style=None, + bottom: int | np.ndarray = 0, + column_num: int = 0, + stacking_id=None, + *, + bins, + **kwds, + ): + if column_num == 0: + cls._initialize_stacker(ax, stacking_id, len(bins) - 1) + + base = np.zeros(len(bins) - 1) + bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds["label"]) + # ignore style + n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds) + cls._update_stacker(ax, stacking_id, n) + return patches + + def _make_plot(self, fig: Figure) -> None: + colors = self._get_colors() + stacking_id = self._get_stacking_id() + + # Re-create iterated data if `by` is assigned by users + data = ( + create_iter_data_given_by(self.data, self._kind) + if self.by is not None + else self.data + ) + + # error: Argument "data" to "_iter_data" of "MPLPlot" has incompatible + # type "object"; expected "DataFrame | dict[Hashable, Series | DataFrame]" + for i, (label, y) in enumerate(self._iter_data(data=data)): # type: ignore[arg-type] + ax = self._get_ax(i) + + kwds = self.kwds.copy() + if self.color is not None: + kwds["color"] = self.color + + label = pprint_thing(label) + label = self._mark_right_label(label, index=i) + kwds["label"] = label + + style, kwds = self._apply_style_colors(colors, kwds, i, label) + if style is not None: + kwds["style"] = style + + self._make_plot_keywords(kwds, y) + + # the bins is multi-dimension array now and each plot need only 1-d and + # when by is applied, label should be columns that are grouped + if self.by is not None: + kwds["bins"] = kwds["bins"][i] + kwds["label"] = self.columns + kwds.pop("color") + + if self.weights is not None: + kwds["weights"] = type(self)._get_column_weights(self.weights, i, y) + + y = reformat_hist_y_given_by(y, self.by) + + artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds) + + # when by is applied, show title for subplots to know which group it is + if self.by is not None: + ax.set_title(pprint_thing(label)) + + self._append_legend_handles_labels(artists[0], label) + + def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None: + """merge BoxPlot/KdePlot properties to passed kwds""" + # y is required for KdePlot + kwds["bottom"] = self.bottom + kwds["bins"] = self.bins + + @final + @staticmethod + def _get_column_weights(weights, i: int, y): + # We allow weights to be a multi-dimensional array, e.g. a (10, 2) array, + # and each sub-array (10,) will be called in each iteration. If users only + # provide 1D array, we assume the same weights is used for all iterations + if weights is not None: + if np.ndim(weights) != 1 and np.shape(weights)[-1] != 1: + try: + weights = weights[:, i] + except IndexError as err: + raise ValueError( + "weights must have the same shape as data, " + "or be a single column" + ) from err + weights = weights[~isna(y)] + return weights + + def _post_plot_logic(self, ax: Axes, data) -> None: + if self.orientation == "horizontal": + # error: Argument 1 to "set_xlabel" of "_AxesBase" has incompatible + # type "Hashable"; expected "str" + ax.set_xlabel( + "Frequency" + if self.xlabel is None + else self.xlabel # type: ignore[arg-type] + ) + ax.set_ylabel(self.ylabel) # type: ignore[arg-type] + else: + ax.set_xlabel(self.xlabel) # type: ignore[arg-type] + ax.set_ylabel( + "Frequency" + if self.ylabel is None + else self.ylabel # type: ignore[arg-type] + ) + + @property + def orientation(self) -> PlottingOrientation: + if self.kwds.get("orientation", None) == "horizontal": + return "horizontal" + else: + return "vertical" + + +class KdePlot(HistPlot): + @property + def _kind(self) -> Literal["kde"]: + return "kde" + + @property + def orientation(self) -> Literal["vertical"]: + return "vertical" + + def __init__( + self, data, bw_method=None, ind=None, *, weights=None, **kwargs + ) -> None: + # Do not call LinePlot.__init__ which may fill nan + MPLPlot.__init__(self, data, **kwargs) # pylint: disable=non-parent-init-called + self.bw_method = bw_method + self.ind = ind + self.weights = weights + + @staticmethod + def _get_ind(y: np.ndarray, ind): + if ind is None: + # np.nanmax() and np.nanmin() ignores the missing values + sample_range = np.nanmax(y) - np.nanmin(y) + ind = np.linspace( + np.nanmin(y) - 0.5 * sample_range, + np.nanmax(y) + 0.5 * sample_range, + 1000, + ) + elif is_integer(ind): + sample_range = np.nanmax(y) - np.nanmin(y) + ind = np.linspace( + np.nanmin(y) - 0.5 * sample_range, + np.nanmax(y) + 0.5 * sample_range, + ind, + ) + return ind + + @classmethod + # error: Signature of "_plot" incompatible with supertype "MPLPlot" + def _plot( # type: ignore[override] + cls, + ax: Axes, + y: np.ndarray, + style=None, + bw_method=None, + ind=None, + column_num=None, + stacking_id: int | None = None, + **kwds, + ): + from scipy.stats import gaussian_kde + + y = remove_na_arraylike(y) + gkde = gaussian_kde(y, bw_method=bw_method) + + y = gkde.evaluate(ind) + lines = MPLPlot._plot(ax, ind, y, style=style, **kwds) + return lines + + def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None: + kwds["bw_method"] = self.bw_method + kwds["ind"] = type(self)._get_ind(y, ind=self.ind) + + def _post_plot_logic(self, ax: Axes, data) -> None: + ax.set_ylabel("Density") + + +def _grouped_plot( + plotf, + data: Series | DataFrame, + column=None, + by=None, + numeric_only: bool = True, + figsize: tuple[float, float] | None = None, + sharex: bool = True, + sharey: bool = True, + layout=None, + rot: float = 0, + ax=None, + **kwargs, +): + # error: Non-overlapping equality check (left operand type: "Optional[Tuple[float, + # float]]", right operand type: "Literal['default']") + if figsize == "default": # type: ignore[comparison-overlap] + # allowed to specify mpl default with 'default' + raise ValueError( + "figsize='default' is no longer supported. " + "Specify figure size by tuple instead" + ) + + grouped = data.groupby(by) + if column is not None: + grouped = grouped[column] + + naxes = len(grouped) + fig, axes = create_subplots( + naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout + ) + + _axes = flatten_axes(axes) + + for i, (key, group) in enumerate(grouped): + ax = _axes[i] + if numeric_only and isinstance(group, ABCDataFrame): + group = group._get_numeric_data() + plotf(group, ax, **kwargs) + ax.set_title(pprint_thing(key)) + + return fig, axes + + +def _grouped_hist( + data: Series | DataFrame, + column=None, + by=None, + ax=None, + bins: int = 50, + figsize: tuple[float, float] | None = None, + layout=None, + sharex: bool = False, + sharey: bool = False, + rot: float = 90, + grid: bool = True, + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, + legend: bool = False, + **kwargs, +): + """ + Grouped histogram + + Parameters + ---------- + data : Series/DataFrame + column : object, optional + by : object, optional + ax : axes, optional + bins : int, default 50 + figsize : tuple, optional + layout : optional + sharex : bool, default False + sharey : bool, default False + rot : float, default 90 + grid : bool, default True + legend: : bool, default False + kwargs : dict, keyword arguments passed to matplotlib.Axes.hist + + Returns + ------- + collection of Matplotlib Axes + """ + if legend: + assert "label" not in kwargs + if data.ndim == 1: + kwargs["label"] = data.name + elif column is None: + kwargs["label"] = data.columns + else: + kwargs["label"] = column + + def plot_group(group, ax) -> None: + ax.hist(group.dropna().values, bins=bins, **kwargs) + if legend: + ax.legend() + + if xrot is None: + xrot = rot + + fig, axes = _grouped_plot( + plot_group, + data, + column=column, + by=by, + sharex=sharex, + sharey=sharey, + ax=ax, + figsize=figsize, + layout=layout, + rot=rot, + ) + + set_ticks_props( + axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot + ) + + maybe_adjust_figure( + fig, bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3 + ) + return axes + + +def hist_series( + self: Series, + by=None, + ax=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, + figsize: tuple[float, float] | None = None, + bins: int = 10, + legend: bool = False, + **kwds, +): + import matplotlib.pyplot as plt + + if legend and "label" in kwds: + raise ValueError("Cannot use both legend and label") + + if by is None: + if kwds.get("layout", None) is not None: + raise ValueError("The 'layout' keyword is not supported when 'by' is None") + # hack until the plotting interface is a bit more unified + fig = kwds.pop( + "figure", plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize) + ) + if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()): + fig.set_size_inches(*figsize, forward=True) + if ax is None: + ax = fig.gca() + elif ax.get_figure() != fig: + raise AssertionError("passed axis not bound to passed figure") + values = self.dropna().values + if legend: + kwds["label"] = self.name + ax.hist(values, bins=bins, **kwds) + if legend: + ax.legend() + ax.grid(grid) + axes = np.array([ax]) + + # error: Argument 1 to "set_ticks_props" has incompatible type "ndarray[Any, + # dtype[Any]]"; expected "Axes | Sequence[Axes]" + set_ticks_props( + axes, # type: ignore[arg-type] + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + ) + + else: + if "figure" in kwds: + raise ValueError( + "Cannot pass 'figure' when using the " + "'by' argument, since a new 'Figure' instance will be created" + ) + axes = _grouped_hist( + self, + by=by, + ax=ax, + grid=grid, + figsize=figsize, + bins=bins, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + legend=legend, + **kwds, + ) + + if hasattr(axes, "ndim"): + if axes.ndim == 1 and len(axes) == 1: + return axes[0] + return axes + + +def hist_frame( + data: DataFrame, + column=None, + by=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, + ax=None, + sharex: bool = False, + sharey: bool = False, + figsize: tuple[float, float] | None = None, + layout=None, + bins: int = 10, + legend: bool = False, + **kwds, +): + if legend and "label" in kwds: + raise ValueError("Cannot use both legend and label") + if by is not None: + axes = _grouped_hist( + data, + column=column, + by=by, + ax=ax, + grid=grid, + figsize=figsize, + sharex=sharex, + sharey=sharey, + layout=layout, + bins=bins, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + legend=legend, + **kwds, + ) + return axes + + if column is not None: + if not isinstance(column, (list, np.ndarray, ABCIndex)): + column = [column] + data = data[column] + # GH32590 + data = data.select_dtypes( + include=(np.number, "datetime64", "datetimetz"), exclude="timedelta" + ) + naxes = len(data.columns) + + if naxes == 0: + raise ValueError( + "hist method requires numerical or datetime columns, nothing to plot." + ) + + fig, axes = create_subplots( + naxes=naxes, + ax=ax, + squeeze=False, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + ) + _axes = flatten_axes(axes) + + can_set_label = "label" not in kwds + + for i, col in enumerate(data.columns): + ax = _axes[i] + if legend and can_set_label: + kwds["label"] = col + ax.hist(data[col].dropna().values, bins=bins, **kwds) + ax.set_title(col) + ax.grid(grid) + if legend: + ax.legend() + + set_ticks_props( + axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot + ) + maybe_adjust_figure(fig, wspace=0.3, hspace=0.3) + + return axes diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/misc.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..1f9212587e05e2e3689b680ff01ae7780230657e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/misc.py @@ -0,0 +1,481 @@ +from __future__ import annotations + +import random +from typing import TYPE_CHECKING + +from matplotlib import patches +import matplotlib.lines as mlines +import numpy as np + +from pandas.core.dtypes.missing import notna + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.style import get_standard_colors +from pandas.plotting._matplotlib.tools import ( + create_subplots, + do_adjust_figure, + maybe_adjust_figure, + set_ticks_props, +) + +if TYPE_CHECKING: + from collections.abc import Hashable + + from matplotlib.axes import Axes + from matplotlib.figure import Figure + + from pandas import ( + DataFrame, + Index, + Series, + ) + + +def scatter_matrix( + frame: DataFrame, + alpha: float = 0.5, + figsize: tuple[float, float] | None = None, + ax=None, + grid: bool = False, + diagonal: str = "hist", + marker: str = ".", + density_kwds=None, + hist_kwds=None, + range_padding: float = 0.05, + **kwds, +): + df = frame._get_numeric_data() + n = df.columns.size + naxes = n * n + fig, axes = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) + + # no gaps between subplots + maybe_adjust_figure(fig, wspace=0, hspace=0) + + mask = notna(df) + + marker = _get_marker_compat(marker) + + hist_kwds = hist_kwds or {} + density_kwds = density_kwds or {} + + # GH 14855 + kwds.setdefault("edgecolors", "none") + + boundaries_list = [] + for a in df.columns: + values = df[a].values[mask[a].values] + rmin_, rmax_ = np.min(values), np.max(values) + rdelta_ext = (rmax_ - rmin_) * range_padding / 2 + boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext)) + + for i, a in enumerate(df.columns): + for j, b in enumerate(df.columns): + ax = axes[i, j] + + if i == j: + values = df[a].values[mask[a].values] + + # Deal with the diagonal by drawing a histogram there. + if diagonal == "hist": + ax.hist(values, **hist_kwds) + + elif diagonal in ("kde", "density"): + from scipy.stats import gaussian_kde + + y = values + gkde = gaussian_kde(y) + ind = np.linspace(y.min(), y.max(), 1000) + ax.plot(ind, gkde.evaluate(ind), **density_kwds) + + ax.set_xlim(boundaries_list[i]) + + else: + common = (mask[a] & mask[b]).values + + ax.scatter( + df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds + ) + + ax.set_xlim(boundaries_list[j]) + ax.set_ylim(boundaries_list[i]) + + ax.set_xlabel(b) + ax.set_ylabel(a) + + if j != 0: + ax.yaxis.set_visible(False) + if i != n - 1: + ax.xaxis.set_visible(False) + + if len(df.columns) > 1: + lim1 = boundaries_list[0] + locs = axes[0][1].yaxis.get_majorticklocs() + locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])] + adj = (locs - lim1[0]) / (lim1[1] - lim1[0]) + + lim0 = axes[0][0].get_ylim() + adj = adj * (lim0[1] - lim0[0]) + lim0[0] + axes[0][0].yaxis.set_ticks(adj) + + if np.all(locs == locs.astype(int)): + # if all ticks are int + locs = locs.astype(int) + axes[0][0].yaxis.set_ticklabels(locs) + + set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) + + return axes + + +def _get_marker_compat(marker): + if marker not in mlines.lineMarkers: + return "o" + return marker + + +def radviz( + frame: DataFrame, + class_column, + ax: Axes | None = None, + color=None, + colormap=None, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + def normalize(series): + a = min(series) + b = max(series) + return (series - a) / (b - a) + + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + df = frame.drop(class_column, axis=1).apply(normalize) + + if ax is None: + ax = plt.gca() + ax.set_xlim(-1, 1) + ax.set_ylim(-1, 1) + + to_plot: dict[Hashable, list[list]] = {} + colors = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + + for kls in classes: + to_plot[kls] = [[], []] + + m = len(frame.columns) - 1 + s = np.array( + [(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]] + ) + + for i in range(n): + row = df.iloc[i].values + row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) + y = (s * row_).sum(axis=0) / row.sum() + kls = class_col.iat[i] + to_plot[kls][0].append(y[0]) + to_plot[kls][1].append(y[1]) + + for i, kls in enumerate(classes): + ax.scatter( + to_plot[kls][0], + to_plot[kls][1], + color=colors[i], + label=pprint_thing(kls), + **kwds, + ) + ax.legend() + + ax.add_patch(patches.Circle((0.0, 0.0), radius=1.0, facecolor="none")) + + for xy, name in zip(s, df.columns): + ax.add_patch(patches.Circle(xy, radius=0.025, facecolor="gray")) + + if xy[0] < 0.0 and xy[1] < 0.0: + ax.text( + xy[0] - 0.025, xy[1] - 0.025, name, ha="right", va="top", size="small" + ) + elif xy[0] < 0.0 <= xy[1]: + ax.text( + xy[0] - 0.025, + xy[1] + 0.025, + name, + ha="right", + va="bottom", + size="small", + ) + elif xy[1] < 0.0 <= xy[0]: + ax.text( + xy[0] + 0.025, xy[1] - 0.025, name, ha="left", va="top", size="small" + ) + elif xy[0] >= 0.0 and xy[1] >= 0.0: + ax.text( + xy[0] + 0.025, xy[1] + 0.025, name, ha="left", va="bottom", size="small" + ) + + ax.axis("equal") + return ax + + +def andrews_curves( + frame: DataFrame, + class_column, + ax: Axes | None = None, + samples: int = 200, + color=None, + colormap=None, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + def function(amplitudes): + def f(t): + x1 = amplitudes[0] + result = x1 / np.sqrt(2.0) + + # Take the rest of the coefficients and resize them + # appropriately. Take a copy of amplitudes as otherwise numpy + # deletes the element from amplitudes itself. + coeffs = np.delete(np.copy(amplitudes), 0) + coeffs = np.resize(coeffs, (int((coeffs.size + 1) / 2), 2)) + + # Generate the harmonics and arguments for the sin and cos + # functions. + harmonics = np.arange(0, coeffs.shape[0]) + 1 + trig_args = np.outer(harmonics, t) + + result += np.sum( + coeffs[:, 0, np.newaxis] * np.sin(trig_args) + + coeffs[:, 1, np.newaxis] * np.cos(trig_args), + axis=0, + ) + return result + + return f + + n = len(frame) + class_col = frame[class_column] + classes = frame[class_column].drop_duplicates() + df = frame.drop(class_column, axis=1) + t = np.linspace(-np.pi, np.pi, samples) + used_legends: set[str] = set() + + color_values = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + colors = dict(zip(classes, color_values)) + if ax is None: + ax = plt.gca() + ax.set_xlim(-np.pi, np.pi) + for i in range(n): + row = df.iloc[i].values + f = function(row) + y = f(t) + kls = class_col.iat[i] + label = pprint_thing(kls) + if label not in used_legends: + used_legends.add(label) + ax.plot(t, y, color=colors[kls], label=label, **kwds) + else: + ax.plot(t, y, color=colors[kls], **kwds) + + ax.legend(loc="upper right") + ax.grid() + return ax + + +def bootstrap_plot( + series: Series, + fig: Figure | None = None, + size: int = 50, + samples: int = 500, + **kwds, +) -> Figure: + import matplotlib.pyplot as plt + + # TODO: is the failure mentioned below still relevant? + # random.sample(ndarray, int) fails on python 3.3, sigh + data = list(series.values) + samplings = [random.sample(data, size) for _ in range(samples)] + + means = np.array([np.mean(sampling) for sampling in samplings]) + medians = np.array([np.median(sampling) for sampling in samplings]) + midranges = np.array( + [(min(sampling) + max(sampling)) * 0.5 for sampling in samplings] + ) + if fig is None: + fig = plt.figure() + x = list(range(samples)) + axes = [] + ax1 = fig.add_subplot(2, 3, 1) + ax1.set_xlabel("Sample") + axes.append(ax1) + ax1.plot(x, means, **kwds) + ax2 = fig.add_subplot(2, 3, 2) + ax2.set_xlabel("Sample") + axes.append(ax2) + ax2.plot(x, medians, **kwds) + ax3 = fig.add_subplot(2, 3, 3) + ax3.set_xlabel("Sample") + axes.append(ax3) + ax3.plot(x, midranges, **kwds) + ax4 = fig.add_subplot(2, 3, 4) + ax4.set_xlabel("Mean") + axes.append(ax4) + ax4.hist(means, **kwds) + ax5 = fig.add_subplot(2, 3, 5) + ax5.set_xlabel("Median") + axes.append(ax5) + ax5.hist(medians, **kwds) + ax6 = fig.add_subplot(2, 3, 6) + ax6.set_xlabel("Midrange") + axes.append(ax6) + ax6.hist(midranges, **kwds) + for axis in axes: + plt.setp(axis.get_xticklabels(), fontsize=8) + plt.setp(axis.get_yticklabels(), fontsize=8) + if do_adjust_figure(fig): + plt.tight_layout() + return fig + + +def parallel_coordinates( + frame: DataFrame, + class_column, + cols=None, + ax: Axes | None = None, + color=None, + use_columns: bool = False, + xticks=None, + colormap=None, + axvlines: bool = True, + axvlines_kwds=None, + sort_labels: bool = False, + **kwds, +) -> Axes: + import matplotlib.pyplot as plt + + if axvlines_kwds is None: + axvlines_kwds = {"linewidth": 1, "color": "black"} + + n = len(frame) + classes = frame[class_column].drop_duplicates() + class_col = frame[class_column] + + if cols is None: + df = frame.drop(class_column, axis=1) + else: + df = frame[cols] + + used_legends: set[str] = set() + + ncols = len(df.columns) + + # determine values to use for xticks + x: list[int] | Index + if use_columns is True: + if not np.all(np.isreal(list(df.columns))): + raise ValueError("Columns must be numeric to be used as xticks") + x = df.columns + elif xticks is not None: + if not np.all(np.isreal(xticks)): + raise ValueError("xticks specified must be numeric") + if len(xticks) != ncols: + raise ValueError("Length of xticks must match number of columns") + x = xticks + else: + x = list(range(ncols)) + + if ax is None: + ax = plt.gca() + + color_values = get_standard_colors( + num_colors=len(classes), colormap=colormap, color_type="random", color=color + ) + + if sort_labels: + classes = sorted(classes) + color_values = sorted(color_values) + colors = dict(zip(classes, color_values)) + + for i in range(n): + y = df.iloc[i].values + kls = class_col.iat[i] + label = pprint_thing(kls) + if label not in used_legends: + used_legends.add(label) + ax.plot(x, y, color=colors[kls], label=label, **kwds) + else: + ax.plot(x, y, color=colors[kls], **kwds) + + if axvlines: + for i in x: + ax.axvline(i, **axvlines_kwds) + + ax.set_xticks(x) + ax.set_xticklabels(df.columns) + ax.set_xlim(x[0], x[-1]) + ax.legend(loc="upper right") + ax.grid() + return ax + + +def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes: + # workaround because `c='b'` is hardcoded in matplotlib's scatter method + import matplotlib.pyplot as plt + + kwds.setdefault("c", plt.rcParams["patch.facecolor"]) + + data = series.values + y1 = data[:-lag] + y2 = data[lag:] + if ax is None: + ax = plt.gca() + ax.set_xlabel("y(t)") + ax.set_ylabel(f"y(t + {lag})") + ax.scatter(y1, y2, **kwds) + return ax + + +def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwds) -> Axes: + import matplotlib.pyplot as plt + + n = len(series) + data = np.asarray(series) + if ax is None: + ax = plt.gca() + ax.set_xlim(1, n) + ax.set_ylim(-1.0, 1.0) + mean = np.mean(data) + c0 = np.sum((data - mean) ** 2) / n + + def r(h): + return ((data[: n - h] - mean) * (data[h:] - mean)).sum() / n / c0 + + x = np.arange(n) + 1 + y = [r(loc) for loc in x] + z95 = 1.959963984540054 + z99 = 2.5758293035489004 + ax.axhline(y=z99 / np.sqrt(n), linestyle="--", color="grey") + ax.axhline(y=z95 / np.sqrt(n), color="grey") + ax.axhline(y=0.0, color="black") + ax.axhline(y=-z95 / np.sqrt(n), color="grey") + ax.axhline(y=-z99 / np.sqrt(n), linestyle="--", color="grey") + ax.set_xlabel("Lag") + ax.set_ylabel("Autocorrelation") + ax.plot(x, y, **kwds) + if "label" in kwds: + ax.legend() + ax.grid() + return ax + + +def unpack_single_str_list(keys): + # GH 42795 + if isinstance(keys, list) and len(keys) == 1: + keys = keys[0] + return keys diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/style.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/style.py new file mode 100644 index 0000000000000000000000000000000000000000..bf4e4be3bfd82e6ce89d526aa0da555f67b9f565 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/style.py @@ -0,0 +1,278 @@ +from __future__ import annotations + +from collections.abc import ( + Collection, + Iterator, +) +import itertools +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import matplotlib as mpl +import matplotlib.colors +import numpy as np + +from pandas._typing import MatplotlibColor as Color +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_list_like + +import pandas.core.common as com + +if TYPE_CHECKING: + from matplotlib.colors import Colormap + + +def get_standard_colors( + num_colors: int, + colormap: Colormap | None = None, + color_type: str = "default", + color: dict[str, Color] | Color | Collection[Color] | None = None, +): + """ + Get standard colors based on `colormap`, `color_type` or `color` inputs. + + Parameters + ---------- + num_colors : int + Minimum number of colors to be returned. + Ignored if `color` is a dictionary. + colormap : :py:class:`matplotlib.colors.Colormap`, optional + Matplotlib colormap. + When provided, the resulting colors will be derived from the colormap. + color_type : {"default", "random"}, optional + Type of colors to derive. Used if provided `color` and `colormap` are None. + Ignored if either `color` or `colormap` are not None. + color : dict or str or sequence, optional + Color(s) to be used for deriving sequence of colors. + Can be either be a dictionary, or a single color (single color string, + or sequence of floats representing a single color), + or a sequence of colors. + + Returns + ------- + dict or list + Standard colors. Can either be a mapping if `color` was a dictionary, + or a list of colors with a length of `num_colors` or more. + + Warns + ----- + UserWarning + If both `colormap` and `color` are provided. + Parameter `color` will override. + """ + if isinstance(color, dict): + return color + + colors = _derive_colors( + color=color, + colormap=colormap, + color_type=color_type, + num_colors=num_colors, + ) + + return list(_cycle_colors(colors, num_colors=num_colors)) + + +def _derive_colors( + *, + color: Color | Collection[Color] | None, + colormap: str | Colormap | None, + color_type: str, + num_colors: int, +) -> list[Color]: + """ + Derive colors from either `colormap`, `color_type` or `color` inputs. + + Get a list of colors either from `colormap`, or from `color`, + or from `color_type` (if both `colormap` and `color` are None). + + Parameters + ---------- + color : str or sequence, optional + Color(s) to be used for deriving sequence of colors. + Can be either be a single color (single color string, or sequence of floats + representing a single color), or a sequence of colors. + colormap : :py:class:`matplotlib.colors.Colormap`, optional + Matplotlib colormap. + When provided, the resulting colors will be derived from the colormap. + color_type : {"default", "random"}, optional + Type of colors to derive. Used if provided `color` and `colormap` are None. + Ignored if either `color` or `colormap`` are not None. + num_colors : int + Number of colors to be extracted. + + Returns + ------- + list + List of colors extracted. + + Warns + ----- + UserWarning + If both `colormap` and `color` are provided. + Parameter `color` will override. + """ + if color is None and colormap is not None: + return _get_colors_from_colormap(colormap, num_colors=num_colors) + elif color is not None: + if colormap is not None: + warnings.warn( + "'color' and 'colormap' cannot be used simultaneously. Using 'color'", + stacklevel=find_stack_level(), + ) + return _get_colors_from_color(color) + else: + return _get_colors_from_color_type(color_type, num_colors=num_colors) + + +def _cycle_colors(colors: list[Color], num_colors: int) -> Iterator[Color]: + """Cycle colors until achieving max of `num_colors` or length of `colors`. + + Extra colors will be ignored by matplotlib if there are more colors + than needed and nothing needs to be done here. + """ + max_colors = max(num_colors, len(colors)) + yield from itertools.islice(itertools.cycle(colors), max_colors) + + +def _get_colors_from_colormap( + colormap: str | Colormap, + num_colors: int, +) -> list[Color]: + """Get colors from colormap.""" + cmap = _get_cmap_instance(colormap) + return [cmap(num) for num in np.linspace(0, 1, num=num_colors)] + + +def _get_cmap_instance(colormap: str | Colormap) -> Colormap: + """Get instance of matplotlib colormap.""" + if isinstance(colormap, str): + cmap = colormap + colormap = mpl.colormaps[colormap] + if colormap is None: + raise ValueError(f"Colormap {cmap} is not recognized") + return colormap + + +def _get_colors_from_color( + color: Color | Collection[Color], +) -> list[Color]: + """Get colors from user input color.""" + if len(color) == 0: + raise ValueError(f"Invalid color argument: {color}") + + if _is_single_color(color): + color = cast(Color, color) + return [color] + + color = cast(Collection[Color], color) + return list(_gen_list_of_colors_from_iterable(color)) + + +def _is_single_color(color: Color | Collection[Color]) -> bool: + """Check if `color` is a single color, not a sequence of colors. + + Single color is of these kinds: + - Named color "red", "C0", "firebrick" + - Alias "g" + - Sequence of floats, such as (0.1, 0.2, 0.3) or (0.1, 0.2, 0.3, 0.4). + + See Also + -------- + _is_single_string_color + """ + if isinstance(color, str) and _is_single_string_color(color): + # GH #36972 + return True + + if _is_floats_color(color): + return True + + return False + + +def _gen_list_of_colors_from_iterable(color: Collection[Color]) -> Iterator[Color]: + """ + Yield colors from string of several letters or from collection of colors. + """ + for x in color: + if _is_single_color(x): + yield x + else: + raise ValueError(f"Invalid color {x}") + + +def _is_floats_color(color: Color | Collection[Color]) -> bool: + """Check if color comprises a sequence of floats representing color.""" + return bool( + is_list_like(color) + and (len(color) == 3 or len(color) == 4) + and all(isinstance(x, (int, float)) for x in color) + ) + + +def _get_colors_from_color_type(color_type: str, num_colors: int) -> list[Color]: + """Get colors from user input color type.""" + if color_type == "default": + return _get_default_colors(num_colors) + elif color_type == "random": + return _get_random_colors(num_colors) + else: + raise ValueError("color_type must be either 'default' or 'random'") + + +def _get_default_colors(num_colors: int) -> list[Color]: + """Get `num_colors` of default colors from matplotlib rc params.""" + import matplotlib.pyplot as plt + + colors = [c["color"] for c in plt.rcParams["axes.prop_cycle"]] + return colors[0:num_colors] + + +def _get_random_colors(num_colors: int) -> list[Color]: + """Get `num_colors` of random colors.""" + return [_random_color(num) for num in range(num_colors)] + + +def _random_color(column: int) -> list[float]: + """Get a random color represented as a list of length 3""" + # GH17525 use common._random_state to avoid resetting the seed + rs = com.random_state(column) + return rs.rand(3).tolist() + + +def _is_single_string_color(color: Color) -> bool: + """Check if `color` is a single string color. + + Examples of single string colors: + - 'r' + - 'g' + - 'red' + - 'green' + - 'C3' + - 'firebrick' + + Parameters + ---------- + color : Color + Color string or sequence of floats. + + Returns + ------- + bool + True if `color` looks like a valid color. + False otherwise. + """ + conv = matplotlib.colors.ColorConverter() + try: + # error: Argument 1 to "to_rgba" of "ColorConverter" has incompatible type + # "str | Sequence[float]"; expected "tuple[float, float, float] | ..." + conv.to_rgba(color) # type: ignore[arg-type] + except ValueError: + return False + else: + return True diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/timeseries.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/timeseries.py new file mode 100644 index 0000000000000000000000000000000000000000..c7ddfa55d0417f8c8fa4addd82faacedf90394d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/timeseries.py @@ -0,0 +1,370 @@ +# TODO: Use the fact that axis can have units to simplify the process + +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + Any, + cast, +) +import warnings + +import numpy as np + +from pandas._libs.tslibs import ( + BaseOffset, + Period, + to_offset, +) +from pandas._libs.tslibs.dtypes import ( + OFFSET_TO_PERIOD_FREQSTR, + FreqGroup, +) + +from pandas.core.dtypes.generic import ( + ABCDatetimeIndex, + ABCPeriodIndex, + ABCTimedeltaIndex, +) + +from pandas.io.formats.printing import pprint_thing +from pandas.plotting._matplotlib.converter import ( + TimeSeries_DateFormatter, + TimeSeries_DateLocator, + TimeSeries_TimedeltaFormatter, +) +from pandas.tseries.frequencies import ( + get_period_alias, + is_subperiod, + is_superperiod, +) + +if TYPE_CHECKING: + from datetime import timedelta + + from matplotlib.axes import Axes + + from pandas._typing import NDFrameT + + from pandas import ( + DataFrame, + DatetimeIndex, + Index, + PeriodIndex, + Series, + ) + +# --------------------------------------------------------------------- +# Plotting functions and monkey patches + + +def maybe_resample(series: Series, ax: Axes, kwargs: dict[str, Any]): + # resample against axes freq if necessary + + if "how" in kwargs: + raise ValueError( + "'how' is not a valid keyword for plotting functions. If plotting " + "multiple objects on shared axes, resample manually first." + ) + + freq, ax_freq = _get_freq(ax, series) + + if freq is None: # pragma: no cover + raise ValueError("Cannot use dynamic axis without frequency info") + + # Convert DatetimeIndex to PeriodIndex + if isinstance(series.index, ABCDatetimeIndex): + series = series.to_period(freq=freq) + + if ax_freq is not None and freq != ax_freq: + if is_superperiod(freq, ax_freq): # upsample input + series = series.copy() + # error: "Index" has no attribute "asfreq" + series.index = series.index.asfreq( # type: ignore[attr-defined] + ax_freq, how="s" + ) + freq = ax_freq + elif _is_sup(freq, ax_freq): # one is weekly + # Resampling with PeriodDtype is deprecated, so we convert to + # DatetimeIndex, resample, then convert back. + ser_ts = series.to_timestamp() + ser_d = ser_ts.resample("D").last().dropna() + ser_freq = ser_d.resample(ax_freq).last().dropna() + series = ser_freq.to_period(ax_freq) + freq = ax_freq + elif is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): + _upsample_others(ax, freq, kwargs) + else: # pragma: no cover + raise ValueError("Incompatible frequency conversion") + return freq, series + + +def _is_sub(f1: str, f2: str) -> bool: + return (f1.startswith("W") and is_subperiod("D", f2)) or ( + f2.startswith("W") and is_subperiod(f1, "D") + ) + + +def _is_sup(f1: str, f2: str) -> bool: + return (f1.startswith("W") and is_superperiod("D", f2)) or ( + f2.startswith("W") and is_superperiod(f1, "D") + ) + + +def _upsample_others(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]) -> None: + legend = ax.get_legend() + lines, labels = _replot_ax(ax, freq) + _replot_ax(ax, freq) + + other_ax = None + if hasattr(ax, "left_ax"): + other_ax = ax.left_ax + if hasattr(ax, "right_ax"): + other_ax = ax.right_ax + + if other_ax is not None: + rlines, rlabels = _replot_ax(other_ax, freq) + lines.extend(rlines) + labels.extend(rlabels) + + if legend is not None and kwargs.get("legend", True) and len(lines) > 0: + title: str | None = legend.get_title().get_text() + if title == "None": + title = None + ax.legend(lines, labels, loc="best", title=title) + + +def _replot_ax(ax: Axes, freq: BaseOffset): + data = getattr(ax, "_plot_data", None) + + # clear current axes and data + # TODO #54485 + ax._plot_data = [] # type: ignore[attr-defined] + ax.clear() + + decorate_axes(ax, freq) + + lines = [] + labels = [] + if data is not None: + for series, plotf, kwds in data: + series = series.copy() + idx = series.index.asfreq(freq, how="S") + series.index = idx + # TODO #54485 + ax._plot_data.append((series, plotf, kwds)) # type: ignore[attr-defined] + + # for tsplot + if isinstance(plotf, str): + from pandas.plotting._matplotlib import PLOT_CLASSES + + plotf = PLOT_CLASSES[plotf]._plot + + lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0]) + labels.append(pprint_thing(series.name)) + + return lines, labels + + +def decorate_axes(ax: Axes, freq: BaseOffset) -> None: + """Initialize axes for time-series plotting""" + if not hasattr(ax, "_plot_data"): + # TODO #54485 + ax._plot_data = [] # type: ignore[attr-defined] + + # TODO #54485 + ax.freq = freq # type: ignore[attr-defined] + xaxis = ax.get_xaxis() + # TODO #54485 + xaxis.freq = freq # type: ignore[attr-defined] + + +def _get_ax_freq(ax: Axes): + """ + Get the freq attribute of the ax object if set. + Also checks shared axes (eg when using secondary yaxis, sharex=True + or twinx) + """ + ax_freq = getattr(ax, "freq", None) + if ax_freq is None: + # check for left/right ax in case of secondary yaxis + if hasattr(ax, "left_ax"): + ax_freq = getattr(ax.left_ax, "freq", None) + elif hasattr(ax, "right_ax"): + ax_freq = getattr(ax.right_ax, "freq", None) + if ax_freq is None: + # check if a shared ax (sharex/twinx) has already freq set + shared_axes = ax.get_shared_x_axes().get_siblings(ax) + if len(shared_axes) > 1: + for shared_ax in shared_axes: + ax_freq = getattr(shared_ax, "freq", None) + if ax_freq is not None: + break + return ax_freq + + +def _get_period_alias(freq: timedelta | BaseOffset | str) -> str | None: + if isinstance(freq, BaseOffset): + freqstr = freq.name + else: + freqstr = to_offset(freq, is_period=True).rule_code + + return get_period_alias(freqstr) + + +def _get_freq(ax: Axes, series: Series): + # get frequency from data + freq = getattr(series.index, "freq", None) + if freq is None: + freq = getattr(series.index, "inferred_freq", None) + freq = to_offset(freq, is_period=True) + + ax_freq = _get_ax_freq(ax) + + # use axes freq if no data freq + if freq is None: + freq = ax_freq + + # get the period frequency + freq = _get_period_alias(freq) + return freq, ax_freq + + +def use_dynamic_x(ax: Axes, data: DataFrame | Series) -> bool: + freq = _get_index_freq(data.index) + ax_freq = _get_ax_freq(ax) + + if freq is None: # convert irregular if axes has freq info + freq = ax_freq + # do not use tsplot if irregular was plotted first + elif (ax_freq is None) and (len(ax.get_lines()) > 0): + return False + + if freq is None: + return False + + freq_str = _get_period_alias(freq) + + if freq_str is None: + return False + + # FIXME: hack this for 0.10.1, creating more technical debt...sigh + if isinstance(data.index, ABCDatetimeIndex): + # error: "BaseOffset" has no attribute "_period_dtype_code" + freq_str = OFFSET_TO_PERIOD_FREQSTR.get(freq_str, freq_str) + base = to_offset( + freq_str, is_period=True + )._period_dtype_code # type: ignore[attr-defined] + x = data.index + if base <= FreqGroup.FR_DAY.value: + return x[:1].is_normalized + period = Period(x[0], freq_str) + assert isinstance(period, Period) + return period.to_timestamp().tz_localize(x.tz) == x[0] + return True + + +def _get_index_freq(index: Index) -> BaseOffset | None: + freq = getattr(index, "freq", None) + if freq is None: + freq = getattr(index, "inferred_freq", None) + if freq == "B": + # error: "Index" has no attribute "dayofweek" + weekdays = np.unique(index.dayofweek) # type: ignore[attr-defined] + if (5 in weekdays) or (6 in weekdays): + freq = None + + freq = to_offset(freq) + return freq + + +def maybe_convert_index(ax: Axes, data: NDFrameT) -> NDFrameT: + # tsplot converts automatically, but don't want to convert index + # over and over for DataFrames + if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): + freq: str | BaseOffset | None = data.index.freq + + if freq is None: + # We only get here for DatetimeIndex + data.index = cast("DatetimeIndex", data.index) + freq = data.index.inferred_freq + freq = to_offset(freq) + + if freq is None: + freq = _get_ax_freq(ax) + + if freq is None: + raise ValueError("Could not get frequency alias for plotting") + + freq_str = _get_period_alias(freq) + + with warnings.catch_warnings(): + # suppress Period[B] deprecation warning + # TODO: need to find an alternative to this before the deprecation + # is enforced! + warnings.filterwarnings( + "ignore", + r"PeriodDtype\[B\] is deprecated", + category=FutureWarning, + ) + + if isinstance(data.index, ABCDatetimeIndex): + data = data.tz_localize(None).to_period(freq=freq_str) + elif isinstance(data.index, ABCPeriodIndex): + data.index = data.index.asfreq(freq=freq_str) + return data + + +# Patch methods for subplot. + + +def _format_coord(freq, t, y) -> str: + time_period = Period(ordinal=int(t), freq=freq) + return f"t = {time_period} y = {y:8f}" + + +def format_dateaxis( + subplot, freq: BaseOffset, index: DatetimeIndex | PeriodIndex +) -> None: + """ + Pretty-formats the date axis (x-axis). + + Major and minor ticks are automatically set for the frequency of the + current underlying series. As the dynamic mode is activated by + default, changing the limits of the x axis will intelligently change + the positions of the ticks. + """ + from matplotlib import pylab + + # handle index specific formatting + # Note: DatetimeIndex does not use this + # interface. DatetimeIndex uses matplotlib.date directly + if isinstance(index, ABCPeriodIndex): + majlocator = TimeSeries_DateLocator( + freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot + ) + minlocator = TimeSeries_DateLocator( + freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot + ) + subplot.xaxis.set_major_locator(majlocator) + subplot.xaxis.set_minor_locator(minlocator) + + majformatter = TimeSeries_DateFormatter( + freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot + ) + minformatter = TimeSeries_DateFormatter( + freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot + ) + subplot.xaxis.set_major_formatter(majformatter) + subplot.xaxis.set_minor_formatter(minformatter) + + # x and y coord info + subplot.format_coord = functools.partial(_format_coord, freq) + + elif isinstance(index, ABCTimedeltaIndex): + subplot.xaxis.set_major_formatter(TimeSeries_TimedeltaFormatter()) + else: + raise TypeError("index type not supported") + + pylab.draw_if_interactive() diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/tools.py b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/tools.py new file mode 100644 index 0000000000000000000000000000000000000000..898b5b25e7b0171e4b41106bf340e7fbc3bbb735 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_matplotlib/tools.py @@ -0,0 +1,492 @@ +# being a bit too dynamic +from __future__ import annotations + +from math import ceil +from typing import TYPE_CHECKING +import warnings + +from matplotlib import ticker +import matplotlib.table +import numpy as np + +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from matplotlib.axes import Axes + from matplotlib.axis import Axis + from matplotlib.figure import Figure + from matplotlib.lines import Line2D + from matplotlib.table import Table + + from pandas import ( + DataFrame, + Series, + ) + + +def do_adjust_figure(fig: Figure) -> bool: + """Whether fig has constrained_layout enabled.""" + if not hasattr(fig, "get_constrained_layout"): + return False + return not fig.get_constrained_layout() + + +def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: + """Call fig.subplots_adjust unless fig has constrained_layout enabled.""" + if do_adjust_figure(fig): + fig.subplots_adjust(*args, **kwargs) + + +def format_date_labels(ax: Axes, rot) -> None: + # mini version of autofmt_xdate + for label in ax.get_xticklabels(): + label.set_horizontalalignment("right") + label.set_rotation(rot) + fig = ax.get_figure() + if fig is not None: + # should always be a Figure but can technically be None + maybe_adjust_figure(fig, bottom=0.2) + + +def table( + ax, data: DataFrame | Series, rowLabels=None, colLabels=None, **kwargs +) -> Table: + if isinstance(data, ABCSeries): + data = data.to_frame() + elif isinstance(data, ABCDataFrame): + pass + else: + raise ValueError("Input data must be DataFrame or Series") + + if rowLabels is None: + rowLabels = data.index + + if colLabels is None: + colLabels = data.columns + + cellText = data.values + + # error: Argument "cellText" to "table" has incompatible type "ndarray[Any, + # Any]"; expected "Sequence[Sequence[str]] | None" + return matplotlib.table.table( + ax, + cellText=cellText, # type: ignore[arg-type] + rowLabels=rowLabels, + colLabels=colLabels, + **kwargs, + ) + + +def _get_layout( + nplots: int, + layout: tuple[int, int] | None = None, + layout_type: str = "box", +) -> tuple[int, int]: + if layout is not None: + if not isinstance(layout, (tuple, list)) or len(layout) != 2: + raise ValueError("Layout must be a tuple of (rows, columns)") + + nrows, ncols = layout + + if nrows == -1 and ncols > 0: + layout = nrows, ncols = (ceil(nplots / ncols), ncols) + elif ncols == -1 and nrows > 0: + layout = nrows, ncols = (nrows, ceil(nplots / nrows)) + elif ncols <= 0 and nrows <= 0: + msg = "At least one dimension of layout must be positive" + raise ValueError(msg) + + if nrows * ncols < nplots: + raise ValueError( + f"Layout of {nrows}x{ncols} must be larger than required size {nplots}" + ) + + return layout + + if layout_type == "single": + return (1, 1) + elif layout_type == "horizontal": + return (1, nplots) + elif layout_type == "vertical": + return (nplots, 1) + + layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)} + try: + return layouts[nplots] + except KeyError: + k = 1 + while k**2 < nplots: + k += 1 + + if (k - 1) * k >= nplots: + return k, (k - 1) + else: + return k, k + + +# copied from matplotlib/pyplot.py and modified for pandas.plotting + + +def create_subplots( + naxes: int, + sharex: bool = False, + sharey: bool = False, + squeeze: bool = True, + subplot_kw=None, + ax=None, + layout=None, + layout_type: str = "box", + **fig_kw, +): + """ + Create a figure with a set of subplots already made. + + This utility wrapper makes it convenient to create common layouts of + subplots, including the enclosing figure object, in a single call. + + Parameters + ---------- + naxes : int + Number of required axes. Exceeded axes are set invisible. Default is + nrows * ncols. + + sharex : bool + If True, the X axis will be shared amongst all subplots. + + sharey : bool + If True, the Y axis will be shared amongst all subplots. + + squeeze : bool + + If True, extra dimensions are squeezed out from the returned axis object: + - if only one subplot is constructed (nrows=ncols=1), the resulting + single Axis object is returned as a scalar. + - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object + array of Axis objects are returned as numpy 1-d arrays. + - for NxM subplots with N>1 and M>1 are returned as a 2d array. + + If False, no squeezing is done: the returned axis object is always + a 2-d array containing Axis instances, even if it ends up being 1x1. + + subplot_kw : dict + Dict with keywords passed to the add_subplot() call used to create each + subplots. + + ax : Matplotlib axis object, optional + + layout : tuple + Number of rows and columns of the subplot grid. + If not specified, calculated from naxes and layout_type + + layout_type : {'box', 'horizontal', 'vertical'}, default 'box' + Specify how to layout the subplot grid. + + fig_kw : Other keyword arguments to be passed to the figure() call. + Note that all keywords not recognized above will be + automatically included here. + + Returns + ------- + fig, ax : tuple + - fig is the Matplotlib Figure object + - ax can be either a single axis object or an array of axis objects if + more than one subplot was created. The dimensions of the resulting array + can be controlled with the squeeze keyword, see above. + + Examples + -------- + x = np.linspace(0, 2*np.pi, 400) + y = np.sin(x**2) + + # Just a figure and one subplot + f, ax = plt.subplots() + ax.plot(x, y) + ax.set_title('Simple plot') + + # Two subplots, unpack the output array immediately + f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) + ax1.plot(x, y) + ax1.set_title('Sharing Y axis') + ax2.scatter(x, y) + + # Four polar axes + plt.subplots(2, 2, subplot_kw=dict(polar=True)) + """ + import matplotlib.pyplot as plt + + if subplot_kw is None: + subplot_kw = {} + + if ax is None: + fig = plt.figure(**fig_kw) + else: + if is_list_like(ax): + if squeeze: + ax = flatten_axes(ax) + if layout is not None: + warnings.warn( + "When passing multiple axes, layout keyword is ignored.", + UserWarning, + stacklevel=find_stack_level(), + ) + if sharex or sharey: + warnings.warn( + "When passing multiple axes, sharex and sharey " + "are ignored. These settings must be specified when creating axes.", + UserWarning, + stacklevel=find_stack_level(), + ) + if ax.size == naxes: + fig = ax.flat[0].get_figure() + return fig, ax + else: + raise ValueError( + f"The number of passed axes must be {naxes}, the " + "same as the output plot" + ) + + fig = ax.get_figure() + # if ax is passed and a number of subplots is 1, return ax as it is + if naxes == 1: + if squeeze: + return fig, ax + else: + return fig, flatten_axes(ax) + else: + warnings.warn( + "To output multiple subplots, the figure containing " + "the passed axes is being cleared.", + UserWarning, + stacklevel=find_stack_level(), + ) + fig.clear() + + nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) + nplots = nrows * ncols + + # Create empty object array to hold all axes. It's easiest to make it 1-d + # so we can just append subplots upon creation, and then + axarr = np.empty(nplots, dtype=object) + + # Create first subplot separately, so we can share it if requested + ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) + + if sharex: + subplot_kw["sharex"] = ax0 + if sharey: + subplot_kw["sharey"] = ax0 + axarr[0] = ax0 + + # Note off-by-one counting because add_subplot uses the MATLAB 1-based + # convention. + for i in range(1, nplots): + kwds = subplot_kw.copy() + # Set sharex and sharey to None for blank/dummy axes, these can + # interfere with proper axis limits on the visible axes if + # they share axes e.g. issue #7528 + if i >= naxes: + kwds["sharex"] = None + kwds["sharey"] = None + ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) + axarr[i] = ax + + if naxes != nplots: + for ax in axarr[naxes:]: + ax.set_visible(False) + + handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) + + if squeeze: + # Reshape the array to have the final desired dimension (nrow,ncol), + # though discarding unneeded dimensions that equal 1. If we only have + # one subplot, just return it instead of a 1-element array. + if nplots == 1: + axes = axarr[0] + else: + axes = axarr.reshape(nrows, ncols).squeeze() + else: + # returned axis array will be always 2-d, even if nrows=ncols=1 + axes = axarr.reshape(nrows, ncols) + + return fig, axes + + +def _remove_labels_from_axis(axis: Axis) -> None: + for t in axis.get_majorticklabels(): + t.set_visible(False) + + # set_visible will not be effective if + # minor axis has NullLocator and NullFormatter (default) + if isinstance(axis.get_minor_locator(), ticker.NullLocator): + axis.set_minor_locator(ticker.AutoLocator()) + if isinstance(axis.get_minor_formatter(), ticker.NullFormatter): + axis.set_minor_formatter(ticker.FormatStrFormatter("")) + for t in axis.get_minorticklabels(): + t.set_visible(False) + + axis.get_label().set_visible(False) + + +def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool: + """ + Return whether an axis is externally shared. + + Parameters + ---------- + ax1 : matplotlib.axes.Axes + Axis to query. + compare_axis : str + `"x"` or `"y"` according to whether the X-axis or Y-axis is being + compared. + + Returns + ------- + bool + `True` if the axis is externally shared. Otherwise `False`. + + Notes + ----- + If two axes with different positions are sharing an axis, they can be + referred to as *externally* sharing the common axis. + + If two axes sharing an axis also have the same position, they can be + referred to as *internally* sharing the common axis (a.k.a twinning). + + _handle_shared_axes() is only interested in axes externally sharing an + axis, regardless of whether either of the axes is also internally sharing + with a third axis. + """ + if compare_axis == "x": + axes = ax1.get_shared_x_axes() + elif compare_axis == "y": + axes = ax1.get_shared_y_axes() + else: + raise ValueError( + "_has_externally_shared_axis() needs 'x' or 'y' as a second parameter" + ) + + axes_siblings = axes.get_siblings(ax1) + + # Retain ax1 and any of its siblings which aren't in the same position as it + ax1_points = ax1.get_position().get_points() + + for ax2 in axes_siblings: + if not np.array_equal(ax1_points, ax2.get_position().get_points()): + return True + + return False + + +def handle_shared_axes( + axarr: Iterable[Axes], + nplots: int, + naxes: int, + nrows: int, + ncols: int, + sharex: bool, + sharey: bool, +) -> None: + if nplots > 1: + row_num = lambda x: x.get_subplotspec().rowspan.start + col_num = lambda x: x.get_subplotspec().colspan.start + + is_first_col = lambda x: x.get_subplotspec().is_first_col() + + if nrows > 1: + try: + # first find out the ax layout, + # so that we can correctly handle 'gaps" + layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_) + for ax in axarr: + layout[row_num(ax), col_num(ax)] = ax.get_visible() + + for ax in axarr: + # only the last row of subplots should get x labels -> all + # other off layout handles the case that the subplot is + # the last in the column, because below is no subplot/gap. + if not layout[row_num(ax) + 1, col_num(ax)]: + continue + if sharex or _has_externally_shared_axis(ax, "x"): + _remove_labels_from_axis(ax.xaxis) + + except IndexError: + # if gridspec is used, ax.rowNum and ax.colNum may different + # from layout shape. in this case, use last_row logic + is_last_row = lambda x: x.get_subplotspec().is_last_row() + for ax in axarr: + if is_last_row(ax): + continue + if sharex or _has_externally_shared_axis(ax, "x"): + _remove_labels_from_axis(ax.xaxis) + + if ncols > 1: + for ax in axarr: + # only the first column should get y labels -> set all other to + # off as we only have labels in the first column and we always + # have a subplot there, we can skip the layout test + if is_first_col(ax): + continue + if sharey or _has_externally_shared_axis(ax, "y"): + _remove_labels_from_axis(ax.yaxis) + + +def flatten_axes(axes: Axes | Sequence[Axes]) -> np.ndarray: + if not is_list_like(axes): + return np.array([axes]) + elif isinstance(axes, (np.ndarray, ABCIndex)): + return np.asarray(axes).ravel() + return np.array(axes) + + +def set_ticks_props( + axes: Axes | Sequence[Axes], + xlabelsize: int | None = None, + xrot=None, + ylabelsize: int | None = None, + yrot=None, +): + import matplotlib.pyplot as plt + + for ax in flatten_axes(axes): + if xlabelsize is not None: + plt.setp(ax.get_xticklabels(), fontsize=xlabelsize) + if xrot is not None: + plt.setp(ax.get_xticklabels(), rotation=xrot) + if ylabelsize is not None: + plt.setp(ax.get_yticklabels(), fontsize=ylabelsize) + if yrot is not None: + plt.setp(ax.get_yticklabels(), rotation=yrot) + return axes + + +def get_all_lines(ax: Axes) -> list[Line2D]: + lines = ax.get_lines() + + if hasattr(ax, "right_ax"): + lines += ax.right_ax.get_lines() + + if hasattr(ax, "left_ax"): + lines += ax.left_ax.get_lines() + + return lines + + +def get_xlim(lines: Iterable[Line2D]) -> tuple[float, float]: + left, right = np.inf, -np.inf + for line in lines: + x = line.get_xdata(orig=False) + left = min(np.nanmin(x), left) + right = max(np.nanmax(x), right) + return left, right diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c391a872905a2b3c242b1a952cf8c8ff2c55352 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4fdc952d75116820241f51229530921d0086562 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c117d88e86bf6f811bd57adee05567e5cd148ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1daaa1eadf03d833db08e559294931fc210448d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_downstream.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_downstream.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cc33349c63a631bd59be6c36b19b03787ad5109 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_downstream.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_errors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee1a2e7fe4c0af764758598156e54fbb6a8b0c4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_errors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_expressions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1242540b004ebf8f55a7d0282801c3e8e5d4c2e9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_expressions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_flags.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_flags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5158d48e39ab35b743037776421e424c1093adec Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_flags.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_multilevel.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_multilevel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06a5d2f573606d08c006aba53773767b42f0049c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_multilevel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e33b73f883751d1d75c971779536ac3814516491 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..577613b83d3557d017867ae3527eb2afbf858aec Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81cd61c0a6e08057b4f71b5a5edb9891fc9dbf99 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_sorting.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_sorting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c3a2e0af32f5ba81fd3614b0dd7ea9e204b9d46 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_sorting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_take.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_take.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5478f8fbaca65221d73caf083e6e990c51358f67 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/__pycache__/test_take.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/base/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b780a73be1c31c053e41d1f1007533ea4e05d223 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb2ee817ad651628fbade896acdcc2efa0789dc6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3d3a5cef3b71f7c95b499bae7b63ffa0af51ee7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..305f3beb2b5094c079c8400b484d17dc6c7dc484 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd97383116aaf2f536620cb333c9b94277b82300 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_misc.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a3284fe16dbfe8afef1aa051fced3508f0206ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_misc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_transpose.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_transpose.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a8c5e0d3d2db45e2bc995f7ab19dda8d0b70ff4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_transpose.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_unique.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_unique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afd08381b03d01c16ea3777e7501e8a66dc2bebe Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_unique.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_value_counts.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_value_counts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bd9ad61fc0f5b879c6f6ceb30a82acd2997defe Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_value_counts.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/common.py b/venv/lib/python3.10/site-packages/pandas/tests/base/common.py new file mode 100644 index 0000000000000000000000000000000000000000..ad0b394105742ca5de92a03a3da2c569c38da469 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/base/common.py @@ -0,0 +1,9 @@ +from typing import Any + +from pandas import Index + + +def allow_na_ops(obj: Any) -> bool: + """Whether to skip test cases including NaN""" + is_bool_index = isinstance(obj, Index) and obj.inferred_type == "boolean" + return not is_bool_index and obj._can_hold_na diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/base/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..f3ac60f672ee1221a9b1b43faf7c2e023d4b9d3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/base/test_constructors.py @@ -0,0 +1,179 @@ +from datetime import datetime +import sys + +import numpy as np +import pytest + +from pandas.compat import PYPY + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm +from pandas.core.accessor import PandasDelegate +from pandas.core.base import ( + NoNewAttributesMixin, + PandasObject, +) + + +def series_via_frame_from_dict(x, **kwargs): + return DataFrame({"a": x}, **kwargs)["a"] + + +def series_via_frame_from_scalar(x, **kwargs): + return DataFrame(x, **kwargs)[0] + + +@pytest.fixture( + params=[ + Series, + series_via_frame_from_dict, + series_via_frame_from_scalar, + Index, + ], + ids=["Series", "DataFrame-dict", "DataFrame-array", "Index"], +) +def constructor(request): + return request.param + + +class TestPandasDelegate: + class Delegator: + _properties = ["prop"] + _methods = ["test_method"] + + def _set_prop(self, value): + self.prop = value + + def _get_prop(self): + return self.prop + + prop = property(_get_prop, _set_prop, doc="foo property") + + def test_method(self, *args, **kwargs): + """a test method""" + + class Delegate(PandasDelegate, PandasObject): + def __init__(self, obj) -> None: + self.obj = obj + + def test_invalid_delegation(self): + # these show that in order for the delegation to work + # the _delegate_* methods need to be overridden to not raise + # a TypeError + + self.Delegate._add_delegate_accessors( + delegate=self.Delegator, + accessors=self.Delegator._properties, + typ="property", + ) + self.Delegate._add_delegate_accessors( + delegate=self.Delegator, accessors=self.Delegator._methods, typ="method" + ) + + delegate = self.Delegate(self.Delegator()) + + msg = "You cannot access the property prop" + with pytest.raises(TypeError, match=msg): + delegate.prop + + msg = "The property prop cannot be set" + with pytest.raises(TypeError, match=msg): + delegate.prop = 5 + + msg = "You cannot access the property prop" + with pytest.raises(TypeError, match=msg): + delegate.prop + + @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") + def test_memory_usage(self): + # Delegate does not implement memory_usage. + # Check that we fall back to in-built `__sizeof__` + # GH 12924 + delegate = self.Delegate(self.Delegator()) + sys.getsizeof(delegate) + + +class TestNoNewAttributesMixin: + def test_mixin(self): + class T(NoNewAttributesMixin): + pass + + t = T() + assert not hasattr(t, "__frozen") + + t.a = "test" + assert t.a == "test" + + t._freeze() + assert "__frozen" in dir(t) + assert getattr(t, "__frozen") + msg = "You cannot add any new attribute" + with pytest.raises(AttributeError, match=msg): + t.b = "test" + + assert not hasattr(t, "b") + + +class TestConstruction: + # test certain constructor behaviours on dtype inference across Series, + # Index and DataFrame + + @pytest.mark.parametrize( + "a", + [ + np.array(["2263-01-01"], dtype="datetime64[D]"), + np.array([datetime(2263, 1, 1)], dtype=object), + np.array([np.datetime64("2263-01-01", "D")], dtype=object), + np.array(["2263-01-01"], dtype=object), + ], + ids=[ + "datetime64[D]", + "object-datetime.datetime", + "object-numpy-scalar", + "object-string", + ], + ) + def test_constructor_datetime_outofbound( + self, a, constructor, request, using_infer_string + ): + # GH-26853 (+ bug GH-26206 out of bound non-ns unit) + + # No dtype specified (dtype inference) + # datetime64[non-ns] raise error, other cases result in object dtype + # and preserve original data + if a.dtype.kind == "M": + # Can't fit in nanosecond bounds -> get the nearest supported unit + result = constructor(a) + assert result.dtype == "M8[s]" + else: + result = constructor(a) + if using_infer_string and "object-string" in request.node.callspec.id: + assert result.dtype == "string" + else: + assert result.dtype == "object" + tm.assert_numpy_array_equal(result.to_numpy(), a) + + # Explicit dtype specified + # Forced conversion fails for all -> all cases raise error + msg = "Out of bounds|Out of bounds .* present at position 0" + with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg): + constructor(a, dtype="datetime64[ns]") + + def test_constructor_datetime_nonns(self, constructor): + arr = np.array(["2020-01-01T00:00:00.000000"], dtype="datetime64[us]") + dta = pd.core.arrays.DatetimeArray._simple_new(arr, dtype=arr.dtype) + expected = constructor(dta) + assert expected.dtype == arr.dtype + + result = constructor(arr) + tm.assert_equal(result, expected) + + # https://github.com/pandas-dev/pandas/issues/34843 + arr.flags.writeable = False + result = constructor(arr) + tm.assert_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/test_conversion.py b/venv/lib/python3.10/site-packages/pandas/tests/base/test_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..fe0f1f1454a55018ad5167d958c73dd6a3bc7018 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/base/test_conversion.py @@ -0,0 +1,562 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas import ( + CategoricalIndex, + Series, + Timedelta, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + TimedeltaArray, +) +from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics + + +class TestToIterable: + # test that we convert an iterable to python types + + dtypes = [ + ("int8", int), + ("int16", int), + ("int32", int), + ("int64", int), + ("uint8", int), + ("uint16", int), + ("uint32", int), + ("uint64", int), + ("float16", float), + ("float32", float), + ("float64", float), + ("datetime64[ns]", Timestamp), + ("datetime64[ns, US/Eastern]", Timestamp), + ("timedelta64[ns]", Timedelta), + ] + + @pytest.mark.parametrize("dtype, rdtype", dtypes) + @pytest.mark.parametrize( + "method", + [ + lambda x: x.tolist(), + lambda x: x.to_list(), + lambda x: list(x), + lambda x: list(x.__iter__()), + ], + ids=["tolist", "to_list", "list", "iter"], + ) + def test_iterable(self, index_or_series, method, dtype, rdtype): + # gh-10904 + # gh-13258 + # coerce iteration to underlying python / pandas types + typ = index_or_series + if dtype == "float16" and issubclass(typ, pd.Index): + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + typ([1], dtype=dtype) + return + s = typ([1], dtype=dtype) + result = method(s)[0] + assert isinstance(result, rdtype) + + @pytest.mark.parametrize( + "dtype, rdtype, obj", + [ + ("object", object, "a"), + ("object", int, 1), + ("category", object, "a"), + ("category", int, 1), + ], + ) + @pytest.mark.parametrize( + "method", + [ + lambda x: x.tolist(), + lambda x: x.to_list(), + lambda x: list(x), + lambda x: list(x.__iter__()), + ], + ids=["tolist", "to_list", "list", "iter"], + ) + def test_iterable_object_and_category( + self, index_or_series, method, dtype, rdtype, obj + ): + # gh-10904 + # gh-13258 + # coerce iteration to underlying python / pandas types + typ = index_or_series + s = typ([obj], dtype=dtype) + result = method(s)[0] + assert isinstance(result, rdtype) + + @pytest.mark.parametrize("dtype, rdtype", dtypes) + def test_iterable_items(self, dtype, rdtype): + # gh-13258 + # test if items yields the correct boxed scalars + # this only applies to series + s = Series([1], dtype=dtype) + _, result = next(iter(s.items())) + assert isinstance(result, rdtype) + + _, result = next(iter(s.items())) + assert isinstance(result, rdtype) + + @pytest.mark.parametrize( + "dtype, rdtype", dtypes + [("object", int), ("category", int)] + ) + def test_iterable_map(self, index_or_series, dtype, rdtype): + # gh-13236 + # coerce iteration to underlying python / pandas types + typ = index_or_series + if dtype == "float16" and issubclass(typ, pd.Index): + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + typ([1], dtype=dtype) + return + s = typ([1], dtype=dtype) + result = s.map(type)[0] + if not isinstance(rdtype, tuple): + rdtype = (rdtype,) + assert result in rdtype + + @pytest.mark.parametrize( + "method", + [ + lambda x: x.tolist(), + lambda x: x.to_list(), + lambda x: list(x), + lambda x: list(x.__iter__()), + ], + ids=["tolist", "to_list", "list", "iter"], + ) + def test_categorial_datetimelike(self, method): + i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")]) + + result = method(i)[0] + assert isinstance(result, Timestamp) + + def test_iter_box_dt64(self, unit): + vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")] + ser = Series(vals).dt.as_unit(unit) + assert ser.dtype == f"datetime64[{unit}]" + for res, exp in zip(ser, vals): + assert isinstance(res, Timestamp) + assert res.tz is None + assert res == exp + assert res.unit == unit + + def test_iter_box_dt64tz(self, unit): + vals = [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + ] + ser = Series(vals).dt.as_unit(unit) + + assert ser.dtype == f"datetime64[{unit}, US/Eastern]" + for res, exp in zip(ser, vals): + assert isinstance(res, Timestamp) + assert res.tz == exp.tz + assert res == exp + assert res.unit == unit + + def test_iter_box_timedelta64(self, unit): + # timedelta + vals = [Timedelta("1 days"), Timedelta("2 days")] + ser = Series(vals).dt.as_unit(unit) + assert ser.dtype == f"timedelta64[{unit}]" + for res, exp in zip(ser, vals): + assert isinstance(res, Timedelta) + assert res == exp + assert res.unit == unit + + def test_iter_box_period(self): + # period + vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")] + s = Series(vals) + assert s.dtype == "Period[M]" + for res, exp in zip(s, vals): + assert isinstance(res, pd.Period) + assert res.freq == "ME" + assert res == exp + + +@pytest.mark.parametrize( + "arr, expected_type, dtype", + [ + (np.array([0, 1], dtype=np.int64), np.ndarray, "int64"), + (np.array(["a", "b"]), np.ndarray, "object"), + (pd.Categorical(["a", "b"]), pd.Categorical, "category"), + ( + pd.DatetimeIndex(["2017", "2018"], tz="US/Central"), + DatetimeArray, + "datetime64[ns, US/Central]", + ), + ( + pd.PeriodIndex([2018, 2019], freq="Y"), + PeriodArray, + pd.core.dtypes.dtypes.PeriodDtype("Y-DEC"), + ), + (pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval"), + ( + pd.DatetimeIndex(["2017", "2018"]), + DatetimeArray, + "datetime64[ns]", + ), + ( + pd.TimedeltaIndex([10**10]), + TimedeltaArray, + "m8[ns]", + ), + ], +) +def test_values_consistent(arr, expected_type, dtype, using_infer_string): + if using_infer_string and dtype == "object": + expected_type = ArrowStringArrayNumpySemantics + l_values = Series(arr)._values + r_values = pd.Index(arr)._values + assert type(l_values) is expected_type + assert type(l_values) is type(r_values) + + tm.assert_equal(l_values, r_values) + + +@pytest.mark.parametrize("arr", [np.array([1, 2, 3])]) +def test_numpy_array(arr): + ser = Series(arr) + result = ser.array + expected = NumpyExtensionArray(arr) + tm.assert_extension_array_equal(result, expected) + + +def test_numpy_array_all_dtypes(any_numpy_dtype): + ser = Series(dtype=any_numpy_dtype) + result = ser.array + if np.dtype(any_numpy_dtype).kind == "M": + assert isinstance(result, DatetimeArray) + elif np.dtype(any_numpy_dtype).kind == "m": + assert isinstance(result, TimedeltaArray) + else: + assert isinstance(result, NumpyExtensionArray) + + +@pytest.mark.parametrize( + "arr, attr", + [ + (pd.Categorical(["a", "b"]), "_codes"), + (PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]"), "_ndarray"), + (pd.array([0, np.nan], dtype="Int64"), "_data"), + (IntervalArray.from_breaks([0, 1]), "_left"), + (SparseArray([0, 1]), "_sparse_values"), + ( + DatetimeArray._from_sequence(np.array([1, 2], dtype="datetime64[ns]")), + "_ndarray", + ), + # tz-aware Datetime + ( + DatetimeArray._from_sequence( + np.array( + ["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]" + ), + dtype=DatetimeTZDtype(tz="US/Central"), + ), + "_ndarray", + ), + ], +) +def test_array(arr, attr, index_or_series, request): + box = index_or_series + + result = box(arr, copy=False).array + + if attr: + arr = getattr(arr, attr) + result = getattr(result, attr) + + assert result is arr + + +def test_array_multiindex_raises(): + idx = pd.MultiIndex.from_product([["A"], ["a", "b"]]) + msg = "MultiIndex has no single backing array" + with pytest.raises(ValueError, match=msg): + idx.array + + +@pytest.mark.parametrize( + "arr, expected", + [ + (np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)), + (pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)), + ( + pd.core.arrays.period_array(["2000", "2001"], freq="D"), + np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]), + ), + (pd.array([0, np.nan], dtype="Int64"), np.array([0, np.nan])), + ( + IntervalArray.from_breaks([0, 1, 2]), + np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object), + ), + (SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)), + # tz-naive datetime + ( + DatetimeArray._from_sequence(np.array(["2000", "2001"], dtype="M8[ns]")), + np.array(["2000", "2001"], dtype="M8[ns]"), + ), + # tz-aware stays tz`-aware + ( + DatetimeArray._from_sequence( + np.array(["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]") + ) + .tz_localize("UTC") + .tz_convert("US/Central"), + np.array( + [ + Timestamp("2000-01-01", tz="US/Central"), + Timestamp("2000-01-02", tz="US/Central"), + ] + ), + ), + # Timedelta + ( + TimedeltaArray._from_sequence( + np.array([0, 3600000000000], dtype="i8").view("m8[ns]") + ), + np.array([0, 3600000000000], dtype="m8[ns]"), + ), + # GH#26406 tz is preserved in Categorical[dt64tz] + ( + pd.Categorical(date_range("2016-01-01", periods=2, tz="US/Pacific")), + np.array( + [ + Timestamp("2016-01-01", tz="US/Pacific"), + Timestamp("2016-01-02", tz="US/Pacific"), + ] + ), + ), + ], +) +def test_to_numpy(arr, expected, index_or_series_or_array, request): + box = index_or_series_or_array + + with tm.assert_produces_warning(None): + thing = box(arr) + + result = thing.to_numpy() + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(thing) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("as_series", [True, False]) +@pytest.mark.parametrize( + "arr", [np.array([1, 2, 3], dtype="int64"), np.array(["a", "b", "c"], dtype=object)] +) +def test_to_numpy_copy(arr, as_series, using_infer_string): + obj = pd.Index(arr, copy=False) + if as_series: + obj = Series(obj.values, copy=False) + + # no copy by default + result = obj.to_numpy() + if using_infer_string and arr.dtype == object: + assert np.shares_memory(arr, result) is False + else: + assert np.shares_memory(arr, result) is True + + result = obj.to_numpy(copy=False) + if using_infer_string and arr.dtype == object: + assert np.shares_memory(arr, result) is False + else: + assert np.shares_memory(arr, result) is True + + # copy=True + result = obj.to_numpy(copy=True) + assert np.shares_memory(arr, result) is False + + +@pytest.mark.parametrize("as_series", [True, False]) +def test_to_numpy_dtype(as_series, unit): + tz = "US/Eastern" + obj = pd.DatetimeIndex(["2000", "2001"], tz=tz) + if as_series: + obj = Series(obj) + + # preserve tz by default + result = obj.to_numpy() + expected = np.array( + [Timestamp("2000", tz=tz), Timestamp("2001", tz=tz)], dtype=object + ) + tm.assert_numpy_array_equal(result, expected) + + result = obj.to_numpy(dtype="object") + tm.assert_numpy_array_equal(result, expected) + + result = obj.to_numpy(dtype="M8[ns]") + expected = np.array(["2000-01-01T05", "2001-01-01T05"], dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values, dtype, na_value, expected", + [ + ([1, 2, None], "float64", 0, [1.0, 2.0, 0.0]), + ( + [Timestamp("2000"), Timestamp("2000"), pd.NaT], + None, + Timestamp("2000"), + [np.datetime64("2000-01-01T00:00:00.000000000")] * 3, + ), + ], +) +def test_to_numpy_na_value_numpy_dtype( + index_or_series, values, dtype, na_value, expected +): + obj = index_or_series(values) + result = obj.to_numpy(dtype=dtype, na_value=na_value) + expected = np.array(expected) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "data, multiindex, dtype, na_value, expected", + [ + ( + [1, 2, None, 4], + [(0, "a"), (0, "b"), (1, "b"), (1, "c")], + float, + None, + [1.0, 2.0, np.nan, 4.0], + ), + ( + [1, 2, None, 4], + [(0, "a"), (0, "b"), (1, "b"), (1, "c")], + float, + np.nan, + [1.0, 2.0, np.nan, 4.0], + ), + ( + [1.0, 2.0, np.nan, 4.0], + [("a", 0), ("a", 1), ("a", 2), ("b", 0)], + int, + 0, + [1, 2, 0, 4], + ), + ( + [Timestamp("2000"), Timestamp("2000"), pd.NaT], + [(0, Timestamp("2021")), (0, Timestamp("2022")), (1, Timestamp("2000"))], + None, + Timestamp("2000"), + [np.datetime64("2000-01-01T00:00:00.000000000")] * 3, + ), + ], +) +def test_to_numpy_multiindex_series_na_value( + data, multiindex, dtype, na_value, expected +): + index = pd.MultiIndex.from_tuples(multiindex) + series = Series(data, index=index) + result = series.to_numpy(dtype=dtype, na_value=na_value) + expected = np.array(expected) + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_kwargs_raises(): + # numpy + s = Series([1, 2, 3]) + msg = r"to_numpy\(\) got an unexpected keyword argument 'foo'" + with pytest.raises(TypeError, match=msg): + s.to_numpy(foo=True) + + # extension + s = Series([1, 2, 3], dtype="Int64") + with pytest.raises(TypeError, match=msg): + s.to_numpy(foo=True) + + +@pytest.mark.parametrize( + "data", + [ + {"a": [1, 2, 3], "b": [1, 2, None]}, + {"a": np.array([1, 2, 3]), "b": np.array([1, 2, np.nan])}, + {"a": pd.array([1, 2, 3]), "b": pd.array([1, 2, None])}, + ], +) +@pytest.mark.parametrize("dtype, na_value", [(float, np.nan), (object, None)]) +def test_to_numpy_dataframe_na_value(data, dtype, na_value): + # https://github.com/pandas-dev/pandas/issues/33820 + df = pd.DataFrame(data) + result = df.to_numpy(dtype=dtype, na_value=na_value) + expected = np.array([[1, 1], [2, 2], [3, na_value]], dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "data, expected", + [ + ( + {"a": pd.array([1, 2, None])}, + np.array([[1.0], [2.0], [np.nan]], dtype=float), + ), + ( + {"a": [1, 2, 3], "b": [1, 2, 3]}, + np.array([[1, 1], [2, 2], [3, 3]], dtype=float), + ), + ], +) +def test_to_numpy_dataframe_single_block(data, expected): + # https://github.com/pandas-dev/pandas/issues/33820 + df = pd.DataFrame(data) + result = df.to_numpy(dtype=float, na_value=np.nan) + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_dataframe_single_block_no_mutate(): + # https://github.com/pandas-dev/pandas/issues/33820 + result = pd.DataFrame(np.array([1.0, 2.0, np.nan])) + expected = pd.DataFrame(np.array([1.0, 2.0, np.nan])) + result.to_numpy(na_value=0.0) + tm.assert_frame_equal(result, expected) + + +class TestAsArray: + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_asarray_object_dt64(self, tz): + ser = Series(date_range("2000", periods=2, tz=tz)) + + with tm.assert_produces_warning(None): + # Future behavior (for tzaware case) with no warning + result = np.asarray(ser, dtype=object) + + expected = np.array( + [Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)] + ) + tm.assert_numpy_array_equal(result, expected) + + def test_asarray_tz_naive(self): + # This shouldn't produce a warning. + ser = Series(date_range("2000", periods=2)) + expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") + result = np.asarray(ser) + + tm.assert_numpy_array_equal(result, expected) + + def test_asarray_tz_aware(self): + tz = "US/Central" + ser = Series(date_range("2000", periods=2, tz=tz)) + expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]") + result = np.asarray(ser, dtype="datetime64[ns]") + + tm.assert_numpy_array_equal(result, expected) + + # Old behavior with no warning + result = np.asarray(ser, dtype="M8[ns]") + + tm.assert_numpy_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/test_fillna.py b/venv/lib/python3.10/site-packages/pandas/tests/base/test_fillna.py new file mode 100644 index 0000000000000000000000000000000000000000..7300d3013305a7ca08312ae85cc42ae8950acf23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/base/test_fillna.py @@ -0,0 +1,60 @@ +""" +Though Index.fillna and Series.fillna has separate impl, +test here to confirm these works as the same +""" + +import numpy as np +import pytest + +from pandas import MultiIndex +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +def test_fillna(index_or_series_obj): + # GH 11343 + obj = index_or_series_obj + + if isinstance(obj, MultiIndex): + msg = "isna is not defined for MultiIndex" + with pytest.raises(NotImplementedError, match=msg): + obj.fillna(0) + return + + # values will not be changed + fill_value = obj.values[0] if len(obj) > 0 else 0 + result = obj.fillna(fill_value) + + tm.assert_equal(obj, result) + + # check shallow_copied + assert obj is not result + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_fillna_null(null_obj, index_or_series_obj): + # GH 11343 + obj = index_or_series_obj + klass = type(obj) + + if not allow_na_ops(obj): + pytest.skip(f"{klass} doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(obj, MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + fill_value = values[0] + expected = values.copy() + values[0:2] = null_obj + expected[0:2] = fill_value + + expected = klass(expected) + obj = klass(values) + + result = obj.fillna(fill_value) + tm.assert_equal(result, expected) + + # check shallow_copied + assert obj is not result diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/test_misc.py b/venv/lib/python3.10/site-packages/pandas/tests/base/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..65e234e799353844bab2a63df582adfa5842d2cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/base/test_misc.py @@ -0,0 +1,191 @@ +import sys + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.compat import PYPY + +from pandas.core.dtypes.common import ( + is_dtype_equal, + is_object_dtype, +) + +import pandas as pd +from pandas import ( + Index, + Series, +) +import pandas._testing as tm + + +def test_isnull_notnull_docstrings(): + # GH#41855 make sure its clear these are aliases + doc = pd.DataFrame.notnull.__doc__ + assert doc.startswith("\nDataFrame.notnull is an alias for DataFrame.notna.\n") + doc = pd.DataFrame.isnull.__doc__ + assert doc.startswith("\nDataFrame.isnull is an alias for DataFrame.isna.\n") + + doc = Series.notnull.__doc__ + assert doc.startswith("\nSeries.notnull is an alias for Series.notna.\n") + doc = Series.isnull.__doc__ + assert doc.startswith("\nSeries.isnull is an alias for Series.isna.\n") + + +@pytest.mark.parametrize( + "op_name, op", + [ + ("add", "+"), + ("sub", "-"), + ("mul", "*"), + ("mod", "%"), + ("pow", "**"), + ("truediv", "/"), + ("floordiv", "//"), + ], +) +def test_binary_ops_docstring(frame_or_series, op_name, op): + # not using the all_arithmetic_functions fixture with _get_opstr + # as _get_opstr is used internally in the dynamic implementation of the docstring + klass = frame_or_series + + operand1 = klass.__name__.lower() + operand2 = "other" + expected_str = " ".join([operand1, op, operand2]) + assert expected_str in getattr(klass, op_name).__doc__ + + # reverse version of the binary ops + expected_str = " ".join([operand2, op, operand1]) + assert expected_str in getattr(klass, "r" + op_name).__doc__ + + +def test_ndarray_compat_properties(index_or_series_obj): + obj = index_or_series_obj + + # Check that we work. + for p in ["shape", "dtype", "T", "nbytes"]: + assert getattr(obj, p, None) is not None + + # deprecated properties + for p in ["strides", "itemsize", "base", "data"]: + assert not hasattr(obj, p) + + msg = "can only convert an array of size 1 to a Python scalar" + with pytest.raises(ValueError, match=msg): + obj.item() # len > 1 + + assert obj.ndim == 1 + assert obj.size == len(obj) + + assert Index([1]).item() == 1 + assert Series([1]).item() == 1 + + +@pytest.mark.skipif( + PYPY or using_pyarrow_string_dtype(), + reason="not relevant for PyPy doesn't work properly for arrow strings", +) +def test_memory_usage(index_or_series_memory_obj): + obj = index_or_series_memory_obj + # Clear index caches so that len(obj) == 0 report 0 memory usage + if isinstance(obj, Series): + is_ser = True + obj.index._engine.clear_mapping() + else: + is_ser = False + obj._engine.clear_mapping() + + res = obj.memory_usage() + res_deep = obj.memory_usage(deep=True) + + is_object = is_object_dtype(obj) or (is_ser and is_object_dtype(obj.index)) + is_categorical = isinstance(obj.dtype, pd.CategoricalDtype) or ( + is_ser and isinstance(obj.index.dtype, pd.CategoricalDtype) + ) + is_object_string = is_dtype_equal(obj, "string[python]") or ( + is_ser and is_dtype_equal(obj.index.dtype, "string[python]") + ) + + if len(obj) == 0: + expected = 0 + assert res_deep == res == expected + elif is_object or is_categorical or is_object_string: + # only deep will pick them up + assert res_deep > res + else: + assert res == res_deep + + # sys.getsizeof will call the .memory_usage with + # deep=True, and add on some GC overhead + diff = res_deep - sys.getsizeof(obj) + assert abs(diff) < 100 + + +def test_memory_usage_components_series(series_with_simple_index): + series = series_with_simple_index + total_usage = series.memory_usage(index=True) + non_index_usage = series.memory_usage(index=False) + index_usage = series.index.memory_usage() + assert total_usage == non_index_usage + index_usage + + +@pytest.mark.parametrize("dtype", tm.NARROW_NP_DTYPES) +def test_memory_usage_components_narrow_series(dtype): + series = Series(range(5), dtype=dtype, index=[f"i-{i}" for i in range(5)], name="a") + total_usage = series.memory_usage(index=True) + non_index_usage = series.memory_usage(index=False) + index_usage = series.index.memory_usage() + assert total_usage == non_index_usage + index_usage + + +def test_searchsorted(request, index_or_series_obj): + # numpy.searchsorted calls obj.searchsorted under the hood. + # See gh-12238 + obj = index_or_series_obj + + if isinstance(obj, pd.MultiIndex): + # See gh-14833 + request.applymarker( + pytest.mark.xfail( + reason="np.searchsorted doesn't work on pd.MultiIndex: GH 14833" + ) + ) + elif obj.dtype.kind == "c" and isinstance(obj, Index): + # TODO: Should Series cases also raise? Looks like they use numpy + # comparison semantics https://github.com/numpy/numpy/issues/15981 + mark = pytest.mark.xfail(reason="complex objects are not comparable") + request.applymarker(mark) + + max_obj = max(obj, default=0) + index = np.searchsorted(obj, max_obj) + assert 0 <= index <= len(obj) + + index = np.searchsorted(obj, max_obj, sorter=range(len(obj))) + assert 0 <= index <= len(obj) + + +def test_access_by_position(index_flat): + index = index_flat + + if len(index) == 0: + pytest.skip("Test doesn't make sense on empty data") + + series = Series(index) + assert index[0] == series.iloc[0] + assert index[5] == series.iloc[5] + assert index[-1] == series.iloc[-1] + + size = len(index) + assert index[-1] == index[size - 1] + + msg = f"index {size} is out of bounds for axis 0 with size {size}" + if is_dtype_equal(index.dtype, "string[pyarrow]") or is_dtype_equal( + index.dtype, "string[pyarrow_numpy]" + ): + msg = "index out of bounds" + with pytest.raises(IndexError, match=msg): + index[size] + msg = "single positional indexer is out-of-bounds" + with pytest.raises(IndexError, match=msg): + series.iloc[size] diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py b/venv/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py new file mode 100644 index 0000000000000000000000000000000000000000..246f33d27476cb419620fb8571984619785f9b62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py @@ -0,0 +1,56 @@ +import numpy as np +import pytest + +from pandas import ( + CategoricalDtype, + DataFrame, +) +import pandas._testing as tm + + +def test_transpose(index_or_series_obj): + obj = index_or_series_obj + tm.assert_equal(obj.transpose(), obj) + + +def test_transpose_non_default_axes(index_or_series_obj): + msg = "the 'axes' parameter is not supported" + obj = index_or_series_obj + with pytest.raises(ValueError, match=msg): + obj.transpose(1) + with pytest.raises(ValueError, match=msg): + obj.transpose(axes=1) + + +def test_numpy_transpose(index_or_series_obj): + msg = "the 'axes' parameter is not supported" + obj = index_or_series_obj + tm.assert_equal(np.transpose(obj), obj) + + with pytest.raises(ValueError, match=msg): + np.transpose(obj, axes=1) + + +@pytest.mark.parametrize( + "data, transposed_data, index, columns, dtype", + [ + ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], int), + ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], CategoricalDtype([1, 2])), + ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], int), + ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], CategoricalDtype([1, 2])), + ([[1, 2], [3, 4]], [[1, 3], [2, 4]], ["a", "a"], ["b", "b"], int), + ( + [[1, 2], [3, 4]], + [[1, 3], [2, 4]], + ["a", "a"], + ["b", "b"], + CategoricalDtype([1, 2, 3, 4]), + ), + ], +) +def test_duplicate_labels(data, transposed_data, index, columns, dtype): + # GH 42380 + df = DataFrame(data, index=index, columns=columns, dtype=dtype) + result = df.T + expected = DataFrame(transposed_data, index=columns, columns=index, dtype=dtype) + tm.assert_frame_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/test_unique.py b/venv/lib/python3.10/site-packages/pandas/tests/base/test_unique.py new file mode 100644 index 0000000000000000000000000000000000000000..d3fe144f70cfc2b54d978ab80ba23ef896948b9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/base/test_unique.py @@ -0,0 +1,124 @@ +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas as pd +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_unique(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + result = obj.unique() + + # dict.fromkeys preserves the order + unique_values = list(dict.fromkeys(obj.values)) + if isinstance(obj, pd.MultiIndex): + expected = pd.MultiIndex.from_tuples(unique_values) + expected.names = obj.names + tm.assert_index_equal(result, expected, exact=True) + elif isinstance(obj, pd.Index): + expected = pd.Index(unique_values, dtype=obj.dtype) + if isinstance(obj.dtype, pd.DatetimeTZDtype): + expected = expected.normalize() + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(unique_values) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_unique_null(null_obj, index_or_series_obj): + obj = index_or_series_obj + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(obj, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + result = obj.unique() + + unique_values_raw = dict.fromkeys(obj.values) + # because np.nan == np.nan is False, but None == None is True + # np.nan would be duplicated, whereas None wouldn't + unique_values_not_null = [val for val in unique_values_raw if not pd.isnull(val)] + unique_values = [null_obj] + unique_values_not_null + + if isinstance(obj, pd.Index): + expected = pd.Index(unique_values, dtype=obj.dtype) + if isinstance(obj.dtype, pd.DatetimeTZDtype): + result = result.normalize() + expected = expected.normalize() + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(unique_values, dtype=obj.dtype) + tm.assert_numpy_array_equal(result, expected) + + +def test_nunique(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + expected = len(obj.unique()) + assert obj.nunique(dropna=False) == expected + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +def test_nunique_null(null_obj, index_or_series_obj): + obj = index_or_series_obj + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif isinstance(obj, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + + if isinstance(obj, pd.CategoricalIndex): + assert obj.nunique() == len(obj.categories) + assert obj.nunique(dropna=False) == len(obj.categories) + 1 + else: + num_unique_values = len(obj.unique()) + assert obj.nunique() == max(0, num_unique_values - 1) + assert obj.nunique(dropna=False) == max(0, num_unique_values) + + +@pytest.mark.single_cpu +@pytest.mark.xfail(using_pyarrow_string_dtype(), reason="decoding fails") +def test_unique_bad_unicode(index_or_series): + # regression test for #34550 + uval = "\ud83d" # smiley emoji + + obj = index_or_series([uval] * 2) + result = obj.unique() + + if isinstance(obj, pd.Index): + expected = pd.Index(["\ud83d"], dtype=object) + tm.assert_index_equal(result, expected, exact=True) + else: + expected = np.array(["\ud83d"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_nunique_dropna(dropna): + # GH37566 + ser = pd.Series(["yes", "yes", pd.NA, np.nan, None, pd.NaT]) + res = ser.nunique(dropna) + assert res == 1 if dropna else 5 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/base/test_value_counts.py b/venv/lib/python3.10/site-packages/pandas/tests/base/test_value_counts.py new file mode 100644 index 0000000000000000000000000000000000000000..27296663988774ff01af6497060b63019c7deeb9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/base/test_value_counts.py @@ -0,0 +1,356 @@ +import collections +from datetime import timedelta + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DatetimeIndex, + Index, + Interval, + IntervalIndex, + MultiIndex, + Series, + Timedelta, + TimedeltaIndex, + array, +) +import pandas._testing as tm +from pandas.tests.base.common import allow_na_ops + + +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_value_counts(index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + result = obj.value_counts() + + counter = collections.Counter(obj) + expected = Series(dict(counter.most_common()), dtype=np.int64, name="count") + + if obj.dtype != np.float16: + expected.index = expected.index.astype(obj.dtype) + else: + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + expected.index.astype(obj.dtype) + return + if isinstance(expected.index, MultiIndex): + expected.index.names = obj.names + else: + expected.index.name = obj.name + + if not isinstance(result.dtype, np.dtype): + if getattr(obj.dtype, "storage", "") == "pyarrow": + expected = expected.astype("int64[pyarrow]") + else: + # i.e IntegerDtype + expected = expected.astype("Int64") + + # TODO(GH#32514): Order of entries with the same count is inconsistent + # on CI (gh-32449) + if obj.duplicated().any(): + result = result.sort_index() + expected = expected.sort_index() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("null_obj", [np.nan, None]) +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +def test_value_counts_null(null_obj, index_or_series_obj): + orig = index_or_series_obj + obj = orig.copy() + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(orig, MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj._values + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + + # because np.nan == np.nan is False, but None == None is True + # np.nan would be duplicated, whereas None wouldn't + counter = collections.Counter(obj.dropna()) + expected = Series(dict(counter.most_common()), dtype=np.int64, name="count") + + if obj.dtype != np.float16: + expected.index = expected.index.astype(obj.dtype) + else: + with pytest.raises(NotImplementedError, match="float16 indexes are not "): + expected.index.astype(obj.dtype) + return + expected.index.name = obj.name + + result = obj.value_counts() + if obj.duplicated().any(): + # TODO(GH#32514): + # Order of entries with the same count is inconsistent on CI (gh-32449) + expected = expected.sort_index() + result = result.sort_index() + + if not isinstance(result.dtype, np.dtype): + if getattr(obj.dtype, "storage", "") == "pyarrow": + expected = expected.astype("int64[pyarrow]") + else: + # i.e IntegerDtype + expected = expected.astype("Int64") + tm.assert_series_equal(result, expected) + + expected[null_obj] = 3 + + result = obj.value_counts(dropna=False) + if obj.duplicated().any(): + # TODO(GH#32514): + # Order of entries with the same count is inconsistent on CI (gh-32449) + expected = expected.sort_index() + result = result.sort_index() + tm.assert_series_equal(result, expected) + + +def test_value_counts_inferred(index_or_series, using_infer_string): + klass = index_or_series + s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"] + s = klass(s_values) + expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"], name="count") + tm.assert_series_equal(s.value_counts(), expected) + + if isinstance(s, Index): + exp = Index(np.unique(np.array(s_values, dtype=np.object_))) + tm.assert_index_equal(s.unique(), exp) + else: + exp = np.unique(np.array(s_values, dtype=np.object_)) + if using_infer_string: + exp = array(exp) + tm.assert_equal(s.unique(), exp) + + assert s.nunique() == 4 + # don't sort, have to sort after the fact as not sorting is + # platform-dep + hist = s.value_counts(sort=False).sort_values() + expected = Series([3, 1, 4, 2], index=list("acbd"), name="count").sort_values() + tm.assert_series_equal(hist, expected) + + # sort ascending + hist = s.value_counts(ascending=True) + expected = Series([1, 2, 3, 4], index=list("cdab"), name="count") + tm.assert_series_equal(hist, expected) + + # relative histogram. + hist = s.value_counts(normalize=True) + expected = Series( + [0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"], name="proportion" + ) + tm.assert_series_equal(hist, expected) + + +def test_value_counts_bins(index_or_series, using_infer_string): + klass = index_or_series + s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"] + s = klass(s_values) + + # bins + msg = "bins argument only works with numeric data" + with pytest.raises(TypeError, match=msg): + s.value_counts(bins=1) + + s1 = Series([1, 1, 2, 3]) + res1 = s1.value_counts(bins=1) + exp1 = Series({Interval(0.997, 3.0): 4}, name="count") + tm.assert_series_equal(res1, exp1) + res1n = s1.value_counts(bins=1, normalize=True) + exp1n = Series({Interval(0.997, 3.0): 1.0}, name="proportion") + tm.assert_series_equal(res1n, exp1n) + + if isinstance(s1, Index): + tm.assert_index_equal(s1.unique(), Index([1, 2, 3])) + else: + exp = np.array([1, 2, 3], dtype=np.int64) + tm.assert_numpy_array_equal(s1.unique(), exp) + + assert s1.nunique() == 3 + + # these return the same + res4 = s1.value_counts(bins=4, dropna=True) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]), name="count") + tm.assert_series_equal(res4, exp4) + + res4 = s1.value_counts(bins=4, dropna=False) + intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0]) + exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]), name="count") + tm.assert_series_equal(res4, exp4) + + res4n = s1.value_counts(bins=4, normalize=True) + exp4n = Series( + [0.5, 0.25, 0.25, 0], index=intervals.take([0, 1, 3, 2]), name="proportion" + ) + tm.assert_series_equal(res4n, exp4n) + + # handle NA's properly + s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"] + s = klass(s_values) + expected = Series([4, 3, 2], index=["b", "a", "d"], name="count") + tm.assert_series_equal(s.value_counts(), expected) + + if isinstance(s, Index): + exp = Index(["a", "b", np.nan, "d"]) + tm.assert_index_equal(s.unique(), exp) + else: + exp = np.array(["a", "b", np.nan, "d"], dtype=object) + if using_infer_string: + exp = array(exp) + tm.assert_equal(s.unique(), exp) + assert s.nunique() == 3 + + s = klass({}) if klass is dict else klass({}, dtype=object) + expected = Series([], dtype=np.int64, name="count") + tm.assert_series_equal(s.value_counts(), expected, check_index_type=False) + # returned dtype differs depending on original + if isinstance(s, Index): + tm.assert_index_equal(s.unique(), Index([]), exact=False) + else: + tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False) + + assert s.nunique() == 0 + + +def test_value_counts_datetime64(index_or_series, unit): + klass = index_or_series + + # GH 3002, datetime64[ns] + # don't test names though + df = pd.DataFrame( + { + "person_id": ["xxyyzz", "xxyyzz", "xxyyzz", "xxyyww", "foofoo", "foofoo"], + "dt": pd.to_datetime( + [ + "2010-01-01", + "2010-01-01", + "2010-01-01", + "2009-01-01", + "2008-09-09", + "2008-09-09", + ] + ).as_unit(unit), + "food": ["PIE", "GUM", "EGG", "EGG", "PIE", "GUM"], + } + ) + + s = klass(df["dt"].copy()) + s.name = None + idx = pd.to_datetime( + ["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"] + ).as_unit(unit) + expected_s = Series([3, 2, 1], index=idx, name="count") + tm.assert_series_equal(s.value_counts(), expected_s) + + expected = array( + np.array( + ["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"], + dtype=f"datetime64[{unit}]", + ) + ) + result = s.unique() + if isinstance(s, Index): + tm.assert_index_equal(result, DatetimeIndex(expected)) + else: + tm.assert_extension_array_equal(result, expected) + + assert s.nunique() == 3 + + # with NaT + s = df["dt"].copy() + s = klass(list(s.values) + [pd.NaT] * 4) + if klass is Series: + s = s.dt.as_unit(unit) + else: + s = s.as_unit(unit) + + result = s.value_counts() + assert result.index.dtype == f"datetime64[{unit}]" + tm.assert_series_equal(result, expected_s) + + result = s.value_counts(dropna=False) + expected_s = pd.concat( + [ + Series([4], index=DatetimeIndex([pd.NaT]).as_unit(unit), name="count"), + expected_s, + ] + ) + tm.assert_series_equal(result, expected_s) + + assert s.dtype == f"datetime64[{unit}]" + unique = s.unique() + assert unique.dtype == f"datetime64[{unit}]" + + # numpy_array_equal cannot compare pd.NaT + if isinstance(s, Index): + exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT]).as_unit(unit) + tm.assert_index_equal(unique, exp_idx) + else: + tm.assert_extension_array_equal(unique[:3], expected) + assert pd.isna(unique[3]) + + assert s.nunique() == 3 + assert s.nunique(dropna=False) == 4 + + +def test_value_counts_timedelta64(index_or_series, unit): + # timedelta64[ns] + klass = index_or_series + + day = Timedelta(timedelta(1)).as_unit(unit) + tdi = TimedeltaIndex([day], name="dt").as_unit(unit) + + tdvals = np.zeros(6, dtype=f"m8[{unit}]") + day + td = klass(tdvals, name="dt") + + result = td.value_counts() + expected_s = Series([6], index=tdi, name="count") + tm.assert_series_equal(result, expected_s) + + expected = tdi + result = td.unique() + if isinstance(td, Index): + tm.assert_index_equal(result, expected) + else: + tm.assert_extension_array_equal(result, expected._values) + + td2 = day + np.zeros(6, dtype=f"m8[{unit}]") + td2 = klass(td2, name="dt") + result2 = td2.value_counts() + tm.assert_series_equal(result2, expected_s) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_value_counts_with_nan(dropna, index_or_series): + # GH31944 + klass = index_or_series + values = [True, pd.NA, np.nan] + obj = klass(values) + res = obj.value_counts(dropna=dropna) + if dropna is True: + expected = Series([1], index=Index([True], dtype=obj.dtype), name="count") + else: + expected = Series([1, 1, 1], index=[True, pd.NA, np.nan], name="count") + tm.assert_series_equal(res, expected) + + +def test_value_counts_object_inference_deprecated(): + # GH#56161 + dti = pd.date_range("2016-01-01", periods=3, tz="UTC") + + idx = dti.astype(object) + msg = "The behavior of value_counts with object-dtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = idx.value_counts() + + exp = dti.value_counts() + tm.assert_series_equal(res, exp) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..1aa458a62502890f56d9e00af02dd4d7b777a5bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_constructors.py @@ -0,0 +1,382 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + Period, + PeriodIndex, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + +# ----------------------------------------------------------------------------- +# Copy/view behaviour for Series / DataFrame constructors + + +@pytest.mark.parametrize("dtype", [None, "int64"]) +def test_series_from_series(dtype, using_copy_on_write, warn_copy_on_write): + # Case: constructing a Series from another Series object follows CoW rules: + # a new object is returned and thus mutations are not propagated + ser = Series([1, 2, 3], name="name") + + # default is copy=False -> new Series is a shallow copy / view of original + result = Series(ser, dtype=dtype) + + # the shallow copy still shares memory + assert np.shares_memory(get_array(ser), get_array(result)) + + if using_copy_on_write: + assert result._mgr.blocks[0].refs.has_reference() + + if using_copy_on_write: + # mutating new series copy doesn't mutate original + result.iloc[0] = 0 + assert ser.iloc[0] == 1 + # mutating triggered a copy-on-write -> no longer shares memory + assert not np.shares_memory(get_array(ser), get_array(result)) + else: + # mutating shallow copy does mutate original + with tm.assert_cow_warning(warn_copy_on_write): + result.iloc[0] = 0 + assert ser.iloc[0] == 0 + # and still shares memory + assert np.shares_memory(get_array(ser), get_array(result)) + + # the same when modifying the parent + result = Series(ser, dtype=dtype) + + if using_copy_on_write: + # mutating original doesn't mutate new series + ser.iloc[0] = 0 + assert result.iloc[0] == 1 + else: + # mutating original does mutate shallow copy + with tm.assert_cow_warning(warn_copy_on_write): + ser.iloc[0] = 0 + assert result.iloc[0] == 0 + + +def test_series_from_series_with_reindex(using_copy_on_write, warn_copy_on_write): + # Case: constructing a Series from another Series with specifying an index + # that potentially requires a reindex of the values + ser = Series([1, 2, 3], name="name") + + # passing an index that doesn't actually require a reindex of the values + # -> without CoW we get an actual mutating view + for index in [ + ser.index, + ser.index.copy(), + list(ser.index), + ser.index.rename("idx"), + ]: + result = Series(ser, index=index) + assert np.shares_memory(ser.values, result.values) + with tm.assert_cow_warning(warn_copy_on_write): + result.iloc[0] = 0 + if using_copy_on_write: + assert ser.iloc[0] == 1 + else: + assert ser.iloc[0] == 0 + + # ensure that if an actual reindex is needed, we don't have any refs + # (mutating the result wouldn't trigger CoW) + result = Series(ser, index=[0, 1, 2, 3]) + assert not np.shares_memory(ser.values, result.values) + if using_copy_on_write: + assert not result._mgr.blocks[0].refs.has_reference() + + +@pytest.mark.parametrize("fastpath", [False, True]) +@pytest.mark.parametrize("dtype", [None, "int64"]) +@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)]) +@pytest.mark.parametrize( + "arr", [np.array([1, 2, 3], dtype="int64"), pd.array([1, 2, 3], dtype="Int64")] +) +def test_series_from_array(using_copy_on_write, idx, dtype, fastpath, arr): + if idx is None or dtype is not None: + fastpath = False + msg = "The 'fastpath' keyword in pd.Series is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + ser = Series(arr, dtype=dtype, index=idx, fastpath=fastpath) + ser_orig = ser.copy() + data = getattr(arr, "_data", arr) + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), data) + else: + assert np.shares_memory(get_array(ser), data) + + arr[0] = 100 + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + else: + expected = Series([100, 2, 3], dtype=dtype if dtype is not None else arr.dtype) + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize("copy", [True, False, None]) +def test_series_from_array_different_dtype(using_copy_on_write, copy): + arr = np.array([1, 2, 3], dtype="int64") + ser = Series(arr, dtype="int32", copy=copy) + assert not np.shares_memory(get_array(ser), arr) + + +@pytest.mark.parametrize( + "idx", + [ + Index([1, 2]), + DatetimeIndex([Timestamp("2019-12-31"), Timestamp("2020-12-31")]), + PeriodIndex([Period("2019-12-31"), Period("2020-12-31")]), + TimedeltaIndex([Timedelta("1 days"), Timedelta("2 days")]), + ], +) +def test_series_from_index(using_copy_on_write, idx): + ser = Series(idx) + expected = idx.copy(deep=True) + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(idx)) + assert not ser._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(ser), get_array(idx)) + ser.iloc[0] = ser.iloc[1] + tm.assert_index_equal(idx, expected) + + +def test_series_from_index_different_dtypes(using_copy_on_write): + idx = Index([1, 2, 3], dtype="int64") + ser = Series(idx, dtype="int32") + assert not np.shares_memory(get_array(ser), get_array(idx)) + if using_copy_on_write: + assert ser._mgr._has_no_reference(0) + + +@pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning") +@pytest.mark.parametrize("fastpath", [False, True]) +@pytest.mark.parametrize("dtype", [None, "int64"]) +@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)]) +def test_series_from_block_manager(using_copy_on_write, idx, dtype, fastpath): + ser = Series([1, 2, 3], dtype="int64") + ser_orig = ser.copy() + msg = "The 'fastpath' keyword in pd.Series is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + ser2 = Series(ser._mgr, dtype=dtype, fastpath=fastpath, index=idx) + assert np.shares_memory(get_array(ser), get_array(ser2)) + if using_copy_on_write: + assert not ser2._mgr._has_no_reference(0) + + ser2.iloc[0] = 100 + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + else: + expected = Series([100, 2, 3]) + tm.assert_series_equal(ser, expected) + + +def test_series_from_block_manager_different_dtype(using_copy_on_write): + ser = Series([1, 2, 3], dtype="int64") + msg = "Passing a SingleBlockManager to Series" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + ser2 = Series(ser._mgr, dtype="int32") + assert not np.shares_memory(get_array(ser), get_array(ser2)) + if using_copy_on_write: + assert ser2._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("use_mgr", [True, False]) +@pytest.mark.parametrize("columns", [None, ["a"]]) +def test_dataframe_constructor_mgr_or_df( + using_copy_on_write, warn_copy_on_write, columns, use_mgr +): + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + + if use_mgr: + data = df._mgr + warn = DeprecationWarning + else: + data = df + warn = None + msg = "Passing a BlockManager to DataFrame" + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): + new_df = DataFrame(data) + + assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a")) + with tm.assert_cow_warning(warn_copy_on_write and not use_mgr): + new_df.iloc[0] = 100 + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), get_array(new_df, "a")) + tm.assert_frame_equal(df, df_orig) + else: + assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a")) + tm.assert_frame_equal(df, new_df) + + +@pytest.mark.parametrize("dtype", [None, "int64", "Int64"]) +@pytest.mark.parametrize("index", [None, [0, 1, 2]]) +@pytest.mark.parametrize("columns", [None, ["a", "b"], ["a", "b", "c"]]) +def test_dataframe_from_dict_of_series( + request, using_copy_on_write, warn_copy_on_write, columns, index, dtype +): + # Case: constructing a DataFrame from Series objects with copy=False + # has to do a lazy following CoW rules + # (the default for DataFrame(dict) is still to copy to ensure consolidation) + s1 = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + s1_orig = s1.copy() + expected = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6]}, index=index, columns=columns, dtype=dtype + ) + + result = DataFrame( + {"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False + ) + + # the shallow copy still shares memory + assert np.shares_memory(get_array(result, "a"), get_array(s1)) + + # mutating the new dataframe doesn't mutate original + with tm.assert_cow_warning(warn_copy_on_write): + result.iloc[0, 0] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(s1)) + tm.assert_series_equal(s1, s1_orig) + else: + assert s1.iloc[0] == 10 + + # the same when modifying the parent series + s1 = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + result = DataFrame( + {"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False + ) + with tm.assert_cow_warning(warn_copy_on_write): + s1.iloc[0] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(s1)) + tm.assert_frame_equal(result, expected) + else: + assert result.iloc[0, 0] == 10 + + +@pytest.mark.parametrize("dtype", [None, "int64"]) +def test_dataframe_from_dict_of_series_with_reindex(dtype): + # Case: constructing a DataFrame from Series objects with copy=False + # and passing an index that requires an actual (no-view) reindex -> need + # to ensure the result doesn't have refs set up to unnecessarily trigger + # a copy on write + s1 = Series([1, 2, 3]) + s2 = Series([4, 5, 6]) + df = DataFrame({"a": s1, "b": s2}, index=[1, 2, 3], dtype=dtype, copy=False) + + # df should own its memory, so mutating shouldn't trigger a copy + arr_before = get_array(df, "a") + assert not np.shares_memory(arr_before, get_array(s1)) + df.iloc[0, 0] = 100 + arr_after = get_array(df, "a") + assert np.shares_memory(arr_before, arr_after) + + +@pytest.mark.parametrize("cons", [Series, Index]) +@pytest.mark.parametrize( + "data, dtype", [([1, 2], None), ([1, 2], "int64"), (["a", "b"], None)] +) +def test_dataframe_from_series_or_index( + using_copy_on_write, warn_copy_on_write, data, dtype, cons +): + obj = cons(data, dtype=dtype) + obj_orig = obj.copy() + df = DataFrame(obj, dtype=dtype) + assert np.shares_memory(get_array(obj), get_array(df, 0)) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = data[-1] + if using_copy_on_write: + tm.assert_equal(obj, obj_orig) + + +@pytest.mark.parametrize("cons", [Series, Index]) +def test_dataframe_from_series_or_index_different_dtype(using_copy_on_write, cons): + obj = cons([1, 2], dtype="int64") + df = DataFrame(obj, dtype="int32") + assert not np.shares_memory(get_array(obj), get_array(df, 0)) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +def test_dataframe_from_series_infer_datetime(using_copy_on_write): + ser = Series([Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype=object) + with tm.assert_produces_warning(FutureWarning, match="Dtype inference"): + df = DataFrame(ser) + assert not np.shares_memory(get_array(ser), get_array(df, 0)) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("index", [None, [0, 1, 2]]) +def test_dataframe_from_dict_of_series_with_dtype(index): + # Variant of above, but now passing a dtype that causes a copy + # -> need to ensure the result doesn't have refs set up to unnecessarily + # trigger a copy on write + s1 = Series([1.0, 2.0, 3.0]) + s2 = Series([4, 5, 6]) + df = DataFrame({"a": s1, "b": s2}, index=index, dtype="int64", copy=False) + + # df should own its memory, so mutating shouldn't trigger a copy + arr_before = get_array(df, "a") + assert not np.shares_memory(arr_before, get_array(s1)) + df.iloc[0, 0] = 100 + arr_after = get_array(df, "a") + assert np.shares_memory(arr_before, arr_after) + + +@pytest.mark.parametrize("copy", [False, None, True]) +def test_frame_from_numpy_array(using_copy_on_write, copy, using_array_manager): + arr = np.array([[1, 2], [3, 4]]) + df = DataFrame(arr, copy=copy) + + if ( + using_copy_on_write + and copy is not False + or copy is True + or (using_array_manager and copy is None) + ): + assert not np.shares_memory(get_array(df, 0), arr) + else: + assert np.shares_memory(get_array(df, 0), arr) + + +def test_dataframe_from_records_with_dataframe(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + with tm.assert_produces_warning(FutureWarning): + df2 = DataFrame.from_records(df) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + with tm.assert_cow_warning(warn_copy_on_write): + df2.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + tm.assert_frame_equal(df, df2) + + +def test_frame_from_dict_of_index(using_copy_on_write): + idx = Index([1, 2, 3]) + expected = idx.copy(deep=True) + df = DataFrame({"a": idx}, copy=False) + assert np.shares_memory(get_array(df, "a"), idx._values) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + + df.iloc[0, 0] = 100 + tm.assert_index_equal(idx, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_internals.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..a727331307d7e9086144aa8d27f70ffa83973620 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_internals.py @@ -0,0 +1,151 @@ +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +@td.skip_array_manager_invalid_test +def test_consolidate(using_copy_on_write): + # create unconsolidated DataFrame + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + df["c"] = [4, 5, 6] + + # take a viewing subset + subset = df[:] + + # each block of subset references a block of df + assert all(blk.refs.has_reference() for blk in subset._mgr.blocks) + + # consolidate the two int64 blocks + subset._consolidate_inplace() + + # the float64 block still references the parent one because it still a view + assert subset._mgr.blocks[0].refs.has_reference() + # equivalent of assert np.shares_memory(df["b"].values, subset["b"].values) + # but avoids caching df["b"] + assert np.shares_memory(get_array(df, "b"), get_array(subset, "b")) + + # the new consolidated int64 block does not reference another + assert not subset._mgr.blocks[1].refs.has_reference() + + # the parent dataframe now also only is linked for the float column + assert not df._mgr.blocks[0].refs.has_reference() + assert df._mgr.blocks[1].refs.has_reference() + assert not df._mgr.blocks[2].refs.has_reference() + + # and modifying subset still doesn't modify parent + if using_copy_on_write: + subset.iloc[0, 1] = 0.0 + assert not df._mgr.blocks[1].refs.has_reference() + assert df.loc[0, "b"] == 0.1 + + +@pytest.mark.single_cpu +@td.skip_array_manager_invalid_test +def test_switch_options(): + # ensure we can switch the value of the option within one session + # (assuming data is constructed after switching) + + # using the option_context to ensure we set back to global option value + # after running the test + with pd.option_context("mode.copy_on_write", False): + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + subset = df[:] + subset.iloc[0, 0] = 0 + # df updated with CoW disabled + assert df.iloc[0, 0] == 0 + + pd.options.mode.copy_on_write = True + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + subset = df[:] + subset.iloc[0, 0] = 0 + # df not updated with CoW enabled + assert df.iloc[0, 0] == 1 + + pd.options.mode.copy_on_write = False + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + subset = df[:] + subset.iloc[0, 0] = 0 + # df updated with CoW disabled + assert df.iloc[0, 0] == 0 + + +@td.skip_array_manager_invalid_test +@pytest.mark.parametrize("dtype", [np.intp, np.int8]) +@pytest.mark.parametrize( + "locs, arr", + [ + ([0], np.array([-1, -2, -3])), + ([1], np.array([-1, -2, -3])), + ([5], np.array([-1, -2, -3])), + ([0, 1], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ([0, 2], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ([0, 1, 2], np.array([[-1, -2, -3], [-4, -5, -6], [-4, -5, -6]]).T), + ([1, 2], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ([1, 3], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ([1, 3], np.array([[-1, -2, -3], [-4, -5, -6]]).T), + ], +) +def test_iset_splits_blocks_inplace(using_copy_on_write, locs, arr, dtype): + # Nothing currently calls iset with + # more than 1 loc with inplace=True (only happens with inplace=False) + # but ensure that it works + df = DataFrame( + { + "a": [1, 2, 3], + "b": [4, 5, 6], + "c": [7, 8, 9], + "d": [10, 11, 12], + "e": [13, 14, 15], + "f": ["a", "b", "c"], + }, + ) + arr = arr.astype(dtype) + df_orig = df.copy() + df2 = df.copy(deep=None) # Trigger a CoW (if enabled, otherwise makes copy) + df2._mgr.iset(locs, arr, inplace=True) + + tm.assert_frame_equal(df, df_orig) + + if using_copy_on_write: + for i, col in enumerate(df.columns): + if i not in locs: + assert np.shares_memory(get_array(df, col), get_array(df2, col)) + else: + for col in df.columns: + assert not np.shares_memory(get_array(df, col), get_array(df2, col)) + + +def test_exponential_backoff(): + # GH#55518 + df = DataFrame({"a": [1, 2, 3]}) + for i in range(490): + df.copy(deep=False) + + assert len(df._mgr.blocks[0].refs.referenced_blocks) == 491 + + df = DataFrame({"a": [1, 2, 3]}) + dfs = [df.copy(deep=False) for i in range(510)] + + for i in range(20): + df.copy(deep=False) + assert len(df._mgr.blocks[0].refs.referenced_blocks) == 531 + assert df._mgr.blocks[0].refs.clear_counter == 1000 + + for i in range(500): + df.copy(deep=False) + + # Don't reduce since we still have over 500 objects alive + assert df._mgr.blocks[0].refs.clear_counter == 1000 + + dfs = dfs[:300] + for i in range(500): + df.copy(deep=False) + + # Reduce since there are less than 500 objects alive + assert df._mgr.blocks[0].refs.clear_counter == 500 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_interp_fillna.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_interp_fillna.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc5879a56d544a5bbcbc9ef72b35d20d6f3b91b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_interp_fillna.py @@ -0,0 +1,432 @@ +import numpy as np +import pytest + +from pandas import ( + NA, + ArrowDtype, + DataFrame, + Interval, + NaT, + Series, + Timestamp, + interval_range, + option_context, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +@pytest.mark.parametrize("method", ["pad", "nearest", "linear"]) +def test_interpolate_no_op(using_copy_on_write, method): + df = DataFrame({"a": [1, 2]}) + df_orig = df.copy() + + warn = None + if method == "pad": + warn = FutureWarning + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(warn, match=msg): + result = df.interpolate(method=method) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + + result.iloc[0, 0] = 100 + + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +def test_interp_fill_functions(using_copy_on_write, func): + # Check that these takes the same code paths as interpolate + df = DataFrame({"a": [1, 2]}) + df_orig = df.copy() + + result = getattr(df, func)() + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + + result.iloc[0, 0] = 100 + + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +@pytest.mark.parametrize( + "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]] +) +def test_interpolate_triggers_copy(using_copy_on_write, vals, func): + df = DataFrame({"a": vals}) + result = getattr(df, func)() + + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + if using_copy_on_write: + # Check that we don't have references when triggering a copy + assert result._mgr._has_no_reference(0) + + +@pytest.mark.parametrize( + "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]] +) +def test_interpolate_inplace_no_reference_no_copy(using_copy_on_write, vals): + df = DataFrame({"a": vals}) + arr = get_array(df, "a") + df.interpolate(method="linear", inplace=True) + + assert np.shares_memory(arr, get_array(df, "a")) + if using_copy_on_write: + # Check that we don't have references when triggering a copy + assert df._mgr._has_no_reference(0) + + +@pytest.mark.parametrize( + "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]] +) +def test_interpolate_inplace_with_refs(using_copy_on_write, vals, warn_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2]}) + df_orig = df.copy() + arr = get_array(df, "a") + view = df[:] + with tm.assert_cow_warning(warn_copy_on_write): + df.interpolate(method="linear", inplace=True) + + if using_copy_on_write: + # Check that copy was triggered in interpolate and that we don't + # have any references left + assert not np.shares_memory(arr, get_array(df, "a")) + tm.assert_frame_equal(df_orig, view) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + else: + assert np.shares_memory(arr, get_array(df, "a")) + + +@pytest.mark.parametrize("func", ["ffill", "bfill"]) +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +def test_interp_fill_functions_inplace( + using_copy_on_write, func, warn_copy_on_write, dtype +): + # Check that these takes the same code paths as interpolate + df = DataFrame({"a": [1, np.nan, 2]}, dtype=dtype) + df_orig = df.copy() + arr = get_array(df, "a") + view = df[:] + + with tm.assert_cow_warning(warn_copy_on_write and dtype == "float64"): + getattr(df, func)(inplace=True) + + if using_copy_on_write: + # Check that copy was triggered in interpolate and that we don't + # have any references left + assert not np.shares_memory(arr, get_array(df, "a")) + tm.assert_frame_equal(df_orig, view) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + else: + assert np.shares_memory(arr, get_array(df, "a")) is (dtype == "float64") + + +def test_interpolate_cleaned_fill_method(using_copy_on_write): + # Check that "method is set to None" case works correctly + df = DataFrame({"a": ["a", np.nan, "c"], "b": 1}) + df_orig = df.copy() + + msg = "DataFrame.interpolate with object dtype" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate(method="linear") + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + + result.iloc[0, 0] = Timestamp("2021-12-31") + + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_interpolate_object_convert_no_op(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"], "b": 1}) + arr_a = get_array(df, "a") + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.interpolate(method="pad", inplace=True) + + # Now CoW makes a copy, it should not! + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert np.shares_memory(arr_a, get_array(df, "a")) + + +def test_interpolate_object_convert_copies(using_copy_on_write): + df = DataFrame({"a": Series([1, 2], dtype=object), "b": 1}) + arr_a = get_array(df, "a") + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.interpolate(method="pad", inplace=True) + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert not np.shares_memory(arr_a, get_array(df, "a")) + + +def test_interpolate_downcast(using_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2.5], "b": 1}) + arr_a = get_array(df, "a") + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.interpolate(method="pad", inplace=True, downcast="infer") + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert np.shares_memory(arr_a, get_array(df, "a")) + + +def test_interpolate_downcast_reference_triggers_copy(using_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2.5], "b": 1}) + df_orig = df.copy() + arr_a = get_array(df, "a") + view = df[:] + msg = "DataFrame.interpolate with method=pad is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.interpolate(method="pad", inplace=True, downcast="infer") + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert not np.shares_memory(arr_a, get_array(df, "a")) + tm.assert_frame_equal(df_orig, view) + else: + tm.assert_frame_equal(df, view) + + +def test_fillna(using_copy_on_write): + df = DataFrame({"a": [1.5, np.nan], "b": 1}) + df_orig = df.copy() + + df2 = df.fillna(5.5) + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + df2.iloc[0, 1] = 100 + tm.assert_frame_equal(df_orig, df) + + +def test_fillna_dict(using_copy_on_write): + df = DataFrame({"a": [1.5, np.nan], "b": 1}) + df_orig = df.copy() + + df2 = df.fillna({"a": 100.5}) + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + df2.iloc[0, 1] = 100 + tm.assert_frame_equal(df_orig, df) + + +@pytest.mark.parametrize("downcast", [None, False]) +def test_fillna_inplace(using_copy_on_write, downcast): + df = DataFrame({"a": [1.5, np.nan], "b": 1}) + arr_a = get_array(df, "a") + arr_b = get_array(df, "b") + + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df.fillna(5.5, inplace=True, downcast=downcast) + assert np.shares_memory(get_array(df, "a"), arr_a) + assert np.shares_memory(get_array(df, "b"), arr_b) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert df._mgr._has_no_reference(1) + + +def test_fillna_inplace_reference(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1.5, np.nan], "b": 1}) + df_orig = df.copy() + arr_a = get_array(df, "a") + arr_b = get_array(df, "b") + view = df[:] + + with tm.assert_cow_warning(warn_copy_on_write): + df.fillna(5.5, inplace=True) + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr_a) + assert np.shares_memory(get_array(df, "b"), arr_b) + assert view._mgr._has_no_reference(0) + assert df._mgr._has_no_reference(0) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "a"), arr_a) + assert np.shares_memory(get_array(df, "b"), arr_b) + expected = DataFrame({"a": [1.5, 5.5], "b": 1}) + tm.assert_frame_equal(df, expected) + + +def test_fillna_interval_inplace_reference(using_copy_on_write, warn_copy_on_write): + # Set dtype explicitly to avoid implicit cast when setting nan + ser = Series( + interval_range(start=0, end=5), name="a", dtype="interval[float64, right]" + ) + ser.iloc[1] = np.nan + + ser_orig = ser.copy() + view = ser[:] + with tm.assert_cow_warning(warn_copy_on_write): + ser.fillna(value=Interval(left=0, right=5), inplace=True) + + if using_copy_on_write: + assert not np.shares_memory( + get_array(ser, "a").left.values, get_array(view, "a").left.values + ) + tm.assert_series_equal(view, ser_orig) + else: + assert np.shares_memory( + get_array(ser, "a").left.values, get_array(view, "a").left.values + ) + + +def test_fillna_series_empty_arg(using_copy_on_write): + ser = Series([1, np.nan, 2]) + ser_orig = ser.copy() + result = ser.fillna({}) + + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(result)) + else: + assert not np.shares_memory(get_array(ser), get_array(result)) + + ser.iloc[0] = 100.5 + tm.assert_series_equal(ser_orig, result) + + +def test_fillna_series_empty_arg_inplace(using_copy_on_write): + ser = Series([1, np.nan, 2]) + arr = get_array(ser) + ser.fillna({}, inplace=True) + + assert np.shares_memory(get_array(ser), arr) + if using_copy_on_write: + assert ser._mgr._has_no_reference(0) + + +def test_fillna_ea_noop_shares_memory( + using_copy_on_write, any_numeric_ea_and_arrow_dtype +): + df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype) + df_orig = df.copy() + df2 = df.fillna(100) + + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not df2._mgr._has_no_reference(1) + elif isinstance(df.dtypes.iloc[0], ArrowDtype): + # arrow is immutable, so no-ops do not need to copy underlying array + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + tm.assert_frame_equal(df_orig, df) + + df2.iloc[0, 1] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert df2._mgr._has_no_reference(1) + assert df._mgr._has_no_reference(1) + tm.assert_frame_equal(df_orig, df) + + +def test_fillna_inplace_ea_noop_shares_memory( + using_copy_on_write, warn_copy_on_write, any_numeric_ea_and_arrow_dtype +): + df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype) + df_orig = df.copy() + view = df[:] + with tm.assert_cow_warning(warn_copy_on_write): + df.fillna(100, inplace=True) + + if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), get_array(view, "a")) + else: + # MaskedArray can actually respect inplace=True + assert np.shares_memory(get_array(df, "a"), get_array(view, "a")) + + assert np.shares_memory(get_array(df, "b"), get_array(view, "b")) + if using_copy_on_write: + assert not df._mgr._has_no_reference(1) + assert not view._mgr._has_no_reference(1) + + with tm.assert_cow_warning( + warn_copy_on_write and "pyarrow" not in any_numeric_ea_and_arrow_dtype + ): + df.iloc[0, 1] = 100 + if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write: + tm.assert_frame_equal(df_orig, view) + else: + # we actually have a view + tm.assert_frame_equal(df, view) + + +def test_fillna_chained_assignment(using_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].fillna(100, inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + df[["a"]].fillna(100, inplace=True) + tm.assert_frame_equal(df, df_orig) + else: + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + df[["a"]].fillna(100, inplace=True) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + df[df.a > 5].fillna(100, inplace=True) + + with tm.assert_produces_warning(FutureWarning, match="inplace method"): + df["a"].fillna(100, inplace=True) + + +@pytest.mark.parametrize("func", ["interpolate", "ffill", "bfill"]) +def test_interpolate_chained_assignment(using_copy_on_write, func): + df = DataFrame({"a": [1, np.nan, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + getattr(df["a"], func)(inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + getattr(df[["a"]], func)(inplace=True) + tm.assert_frame_equal(df, df_orig) + else: + with tm.assert_produces_warning(FutureWarning, match="inplace method"): + getattr(df["a"], func)(inplace=True) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + getattr(df[["a"]], func)(inplace=True) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + getattr(df[df["a"] > 1], func)(inplace=True) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cab6f82efdaa147fb205e1bf65948b72799d5637 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_datetime_index.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_datetime_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dabf81e4bea206a3d671030aa1460d0c4af1ac71 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_datetime_index.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_resample_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_resample_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2037bd49bdd58275f1a02e434cf2a0c12e535790 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_resample_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_resampler_grouper.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_resampler_grouper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c5f6ddab8a5752b5c9894bce4262b3f1d4680ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_resampler_grouper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_time_grouper.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_time_grouper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..affe2cfceaaabbeba8ebdb557a4ea691270ce825 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/resample/__pycache__/test_time_grouper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/util/__init__.py b/venv/lib/python3.10/site-packages/pandas/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82b3aa56c653cd1241872c67e9d9016df04a6c5a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/__init__.py @@ -0,0 +1,29 @@ +def __getattr__(key: str): + # These imports need to be lazy to avoid circular import errors + if key == "hash_array": + from pandas.core.util.hashing import hash_array + + return hash_array + if key == "hash_pandas_object": + from pandas.core.util.hashing import hash_pandas_object + + return hash_pandas_object + if key == "Appender": + from pandas.util._decorators import Appender + + return Appender + if key == "Substitution": + from pandas.util._decorators import Substitution + + return Substitution + + if key == "cache_readonly": + from pandas.util._decorators import cache_readonly + + return cache_readonly + + raise AttributeError(f"module 'pandas.util' has no attribute '{key}'") + + +def capitalize_first_letter(s): + return s[:1].upper() + s[1:] diff --git a/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efdc8fd83ed570fdc939fcb22b6a7c2e0410b0ad Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_print_versions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_print_versions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32f3f73e5a20293750e97e401ab5860fe1c7841f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_print_versions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdda216ec43c05e60be9e1ff5183c012b7f8831a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/util/_decorators.py b/venv/lib/python3.10/site-packages/pandas/util/_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8189e72c427d75da0367532edfe04d0a7581e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/_decorators.py @@ -0,0 +1,508 @@ +from __future__ import annotations + +from functools import wraps +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, +) +import warnings + +from pandas._libs.properties import cache_readonly +from pandas._typing import ( + F, + T, +) +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import Mapping + + +def deprecate( + name: str, + alternative: Callable[..., Any], + version: str, + alt_name: str | None = None, + klass: type[Warning] | None = None, + stacklevel: int = 2, + msg: str | None = None, +) -> Callable[[F], F]: + """ + Return a new function that emits a deprecation warning on use. + + To use this method for a deprecated function, another function + `alternative` with the same signature must exist. The deprecated + function will emit a deprecation warning, and in the docstring + it will contain the deprecation directive with the provided version + so it can be detected for future removal. + + Parameters + ---------- + name : str + Name of function to deprecate. + alternative : func + Function to use instead. + version : str + Version of pandas in which the method has been deprecated. + alt_name : str, optional + Name to use in preference of alternative.__name__. + klass : Warning, default FutureWarning + stacklevel : int, default 2 + msg : str + The message to display in the warning. + Default is '{name} is deprecated. Use {alt_name} instead.' + """ + alt_name = alt_name or alternative.__name__ + klass = klass or FutureWarning + warning_msg = msg or f"{name} is deprecated, use {alt_name} instead." + + @wraps(alternative) + def wrapper(*args, **kwargs) -> Callable[..., Any]: + warnings.warn(warning_msg, klass, stacklevel=stacklevel) + return alternative(*args, **kwargs) + + # adding deprecated directive to the docstring + msg = msg or f"Use `{alt_name}` instead." + doc_error_msg = ( + "deprecate needs a correctly formatted docstring in " + "the target function (should have a one liner short " + "summary, and opening quotes should be in their own " + f"line). Found:\n{alternative.__doc__}" + ) + + # when python is running in optimized mode (i.e. `-OO`), docstrings are + # removed, so we check that a docstring with correct formatting is used + # but we allow empty docstrings + if alternative.__doc__: + if alternative.__doc__.count("\n") < 3: + raise AssertionError(doc_error_msg) + empty1, summary, empty2, doc_string = alternative.__doc__.split("\n", 3) + if empty1 or empty2 and not summary: + raise AssertionError(doc_error_msg) + wrapper.__doc__ = dedent( + f""" + {summary.strip()} + + .. deprecated:: {version} + {msg} + + {dedent(doc_string)}""" + ) + # error: Incompatible return value type (got "Callable[[VarArg(Any), KwArg(Any)], + # Callable[...,Any]]", expected "Callable[[F], F]") + return wrapper # type: ignore[return-value] + + +def deprecate_kwarg( + old_arg_name: str, + new_arg_name: str | None, + mapping: Mapping[Any, Any] | Callable[[Any], Any] | None = None, + stacklevel: int = 2, +) -> Callable[[F], F]: + """ + Decorator to deprecate a keyword argument of a function. + + Parameters + ---------- + old_arg_name : str + Name of argument in function to deprecate + new_arg_name : str or None + Name of preferred argument in function. Use None to raise warning that + ``old_arg_name`` keyword is deprecated. + mapping : dict or callable + If mapping is present, use it to translate old arguments to + new arguments. A callable must do its own value checking; + values not found in a dict will be forwarded unchanged. + + Examples + -------- + The following deprecates 'cols', using 'columns' instead + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') + ... def f(columns=''): + ... print(columns) + ... + >>> f(columns='should work ok') + should work ok + + >>> f(cols='should raise warning') # doctest: +SKIP + FutureWarning: cols is deprecated, use columns instead + warnings.warn(msg, FutureWarning) + should raise warning + + >>> f(cols='should error', columns="can\'t pass do both") # doctest: +SKIP + TypeError: Can only specify 'cols' or 'columns', not both + + >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) + ... def f(new=False): + ... print('yes!' if new else 'no!') + ... + >>> f(old='yes') # doctest: +SKIP + FutureWarning: old='yes' is deprecated, use new=True instead + warnings.warn(msg, FutureWarning) + yes! + + To raise a warning that a keyword will be removed entirely in the future + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) + ... def f(cols='', another_param=''): + ... print(cols) + ... + >>> f(cols='should raise warning') # doctest: +SKIP + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + >>> f(another_param='should not raise warning') # doctest: +SKIP + should not raise warning + + >>> f(cols='should raise warning', another_param='') # doctest: +SKIP + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + """ + if mapping is not None and not hasattr(mapping, "get") and not callable(mapping): + raise TypeError( + "mapping from old to new argument values must be dict or callable!" + ) + + def _deprecate_kwarg(func: F) -> F: + @wraps(func) + def wrapper(*args, **kwargs) -> Callable[..., Any]: + old_arg_value = kwargs.pop(old_arg_name, None) + + if old_arg_value is not None: + if new_arg_name is None: + msg = ( + f"the {repr(old_arg_name)} keyword is deprecated and " + "will be removed in a future version. Please take " + f"steps to stop the use of {repr(old_arg_name)}" + ) + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + kwargs[old_arg_name] = old_arg_value + return func(*args, **kwargs) + + elif mapping is not None: + if callable(mapping): + new_arg_value = mapping(old_arg_value) + else: + new_arg_value = mapping.get(old_arg_value, old_arg_value) + msg = ( + f"the {old_arg_name}={repr(old_arg_value)} keyword is " + "deprecated, use " + f"{new_arg_name}={repr(new_arg_value)} instead." + ) + else: + new_arg_value = old_arg_value + msg = ( + f"the {repr(old_arg_name)} keyword is deprecated, " + f"use {repr(new_arg_name)} instead." + ) + + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + if kwargs.get(new_arg_name) is not None: + msg = ( + f"Can only specify {repr(old_arg_name)} " + f"or {repr(new_arg_name)}, not both." + ) + raise TypeError(msg) + kwargs[new_arg_name] = new_arg_value + return func(*args, **kwargs) + + return cast(F, wrapper) + + return _deprecate_kwarg + + +def _format_argument_list(allow_args: list[str]) -> str: + """ + Convert the allow_args argument (either string or integer) of + `deprecate_nonkeyword_arguments` function to a string describing + it to be inserted into warning message. + + Parameters + ---------- + allowed_args : list, tuple or int + The `allowed_args` argument for `deprecate_nonkeyword_arguments`, + but None value is not allowed. + + Returns + ------- + str + The substring describing the argument list in best way to be + inserted to the warning message. + + Examples + -------- + `format_argument_list([])` -> '' + `format_argument_list(['a'])` -> "except for the arguments 'a'" + `format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'" + `format_argument_list(['a', 'b', 'c'])` -> + "except for the arguments 'a', 'b' and 'c'" + """ + if "self" in allow_args: + allow_args.remove("self") + if not allow_args: + return "" + elif len(allow_args) == 1: + return f" except for the argument '{allow_args[0]}'" + else: + last = allow_args[-1] + args = ", ".join(["'" + x + "'" for x in allow_args[:-1]]) + return f" except for the arguments {args} and '{last}'" + + +def future_version_msg(version: str | None) -> str: + """Specify which version of pandas the deprecation will take place in.""" + if version is None: + return "In a future version of pandas" + else: + return f"Starting with pandas version {version}" + + +def deprecate_nonkeyword_arguments( + version: str | None, + allowed_args: list[str] | None = None, + name: str | None = None, +) -> Callable[[F], F]: + """ + Decorator to deprecate a use of non-keyword arguments of a function. + + Parameters + ---------- + version : str, optional + The version in which positional arguments will become + keyword-only. If None, then the warning message won't + specify any particular version. + + allowed_args : list, optional + In case of list, it must be the list of names of some + first arguments of the decorated functions that are + OK to be given as positional arguments. In case of None value, + defaults to list of all arguments not having the + default value. + + name : str, optional + The specific name of the function to show in the warning + message. If None, then the Qualified name of the function + is used. + """ + + def decorate(func): + old_sig = inspect.signature(func) + + if allowed_args is not None: + allow_args = allowed_args + else: + allow_args = [ + p.name + for p in old_sig.parameters.values() + if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.default is p.empty + ] + + new_params = [ + p.replace(kind=p.KEYWORD_ONLY) + if ( + p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.name not in allow_args + ) + else p + for p in old_sig.parameters.values() + ] + new_params.sort(key=lambda p: p.kind) + new_sig = old_sig.replace(parameters=new_params) + + num_allow_args = len(allow_args) + msg = ( + f"{future_version_msg(version)} all arguments of " + f"{name or func.__qualname__}{{arguments}} will be keyword-only." + ) + + @wraps(func) + def wrapper(*args, **kwargs): + if len(args) > num_allow_args: + warnings.warn( + msg.format(arguments=_format_argument_list(allow_args)), + FutureWarning, + stacklevel=find_stack_level(), + ) + return func(*args, **kwargs) + + # error: "Callable[[VarArg(Any), KwArg(Any)], Any]" has no + # attribute "__signature__" + wrapper.__signature__ = new_sig # type: ignore[attr-defined] + return wrapper + + return decorate + + +def doc(*docstrings: None | str | Callable, **params) -> Callable[[F], F]: + """ + A decorator to take docstring templates, concatenate them and perform string + substitution on them. + + This decorator will add a variable "_docstring_components" to the wrapped + callable to keep track the original docstring template for potential usage. + If it should be consider as a template, it will be saved as a string. + Otherwise, it will be saved as callable, and later user __doc__ and dedent + to get docstring. + + Parameters + ---------- + *docstrings : None, str, or callable + The string / docstring / docstring template to be appended in order + after default docstring under callable. + **params + The string which would be used to format docstring template. + """ + + def decorator(decorated: F) -> F: + # collecting docstring and docstring templates + docstring_components: list[str | Callable] = [] + if decorated.__doc__: + docstring_components.append(dedent(decorated.__doc__)) + + for docstring in docstrings: + if docstring is None: + continue + if hasattr(docstring, "_docstring_components"): + docstring_components.extend( + docstring._docstring_components # pyright: ignore[reportGeneralTypeIssues] + ) + elif isinstance(docstring, str) or docstring.__doc__: + docstring_components.append(docstring) + + params_applied = [ + component.format(**params) + if isinstance(component, str) and len(params) > 0 + else component + for component in docstring_components + ] + + decorated.__doc__ = "".join( + [ + component + if isinstance(component, str) + else dedent(component.__doc__ or "") + for component in params_applied + ] + ) + + # error: "F" has no attribute "_docstring_components" + decorated._docstring_components = ( # type: ignore[attr-defined] + docstring_components + ) + return decorated + + return decorator + + +# Substitution and Appender are derived from matplotlib.docstring (1.1.0) +# module https://matplotlib.org/users/license.html + + +class Substitution: + """ + A decorator to take a function's docstring and perform string + substitution on it. + + This decorator should be robust even if func.__doc__ is None + (for example, if -OO was passed to the interpreter) + + Usage: construct a docstring.Substitution with a sequence or + dictionary suitable for performing substitution; then + decorate a suitable function with the constructed object. e.g. + + sub_author_name = Substitution(author='Jason') + + @sub_author_name + def some_function(x): + "%(author)s wrote this function" + + # note that some_function.__doc__ is now "Jason wrote this function" + + One can also use positional arguments. + + sub_first_last_names = Substitution('Edgar Allen', 'Poe') + + @sub_first_last_names + def some_function(x): + "%s %s wrote the Raven" + """ + + def __init__(self, *args, **kwargs) -> None: + if args and kwargs: + raise AssertionError("Only positional or keyword args are allowed") + + self.params = args or kwargs + + def __call__(self, func: F) -> F: + func.__doc__ = func.__doc__ and func.__doc__ % self.params + return func + + def update(self, *args, **kwargs) -> None: + """ + Update self.params with supplied args. + """ + if isinstance(self.params, dict): + self.params.update(*args, **kwargs) + + +class Appender: + """ + A function decorator that will append an addendum to the docstring + of the target function. + + This decorator should be robust even if func.__doc__ is None + (for example, if -OO was passed to the interpreter). + + Usage: construct a docstring.Appender with a string to be joined to + the original docstring. An optional 'join' parameter may be supplied + which will be used to join the docstring and addendum. e.g. + + add_copyright = Appender("Copyright (c) 2009", join='\n') + + @add_copyright + def my_dog(has='fleas'): + "This docstring will have a copyright below" + pass + """ + + addendum: str | None + + def __init__(self, addendum: str | None, join: str = "", indents: int = 0) -> None: + if indents > 0: + self.addendum = indent(addendum, indents=indents) + else: + self.addendum = addendum + self.join = join + + def __call__(self, func: T) -> T: + func.__doc__ = func.__doc__ if func.__doc__ else "" + self.addendum = self.addendum if self.addendum else "" + docitems = [func.__doc__, self.addendum] + func.__doc__ = dedent(self.join.join(docitems)) + return func + + +def indent(text: str | None, indents: int = 1) -> str: + if not text or not isinstance(text, str): + return "" + jointext = "".join(["\n"] + [" "] * indents) + return jointext.join(text.split("\n")) + + +__all__ = [ + "Appender", + "cache_readonly", + "deprecate", + "deprecate_kwarg", + "deprecate_nonkeyword_arguments", + "doc", + "future_version_msg", + "Substitution", +] diff --git a/venv/lib/python3.10/site-packages/pandas/util/_doctools.py b/venv/lib/python3.10/site-packages/pandas/util/_doctools.py new file mode 100644 index 0000000000000000000000000000000000000000..12619abf4baaf336dfd3d5ae78a9bc2133f310c0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/_doctools.py @@ -0,0 +1,202 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +import pandas as pd + +if TYPE_CHECKING: + from collections.abc import Iterable + + +class TablePlotter: + """ + Layout some DataFrames in vertical/horizontal layout for explanation. + Used in merging.rst + """ + + def __init__( + self, + cell_width: float = 0.37, + cell_height: float = 0.25, + font_size: float = 7.5, + ) -> None: + self.cell_width = cell_width + self.cell_height = cell_height + self.font_size = font_size + + def _shape(self, df: pd.DataFrame) -> tuple[int, int]: + """ + Calculate table shape considering index levels. + """ + row, col = df.shape + return row + df.columns.nlevels, col + df.index.nlevels + + def _get_cells(self, left, right, vertical) -> tuple[int, int]: + """ + Calculate appropriate figure size based on left and right data. + """ + if vertical: + # calculate required number of cells + vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0]) + hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1] + else: + vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]]) + hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]]) + return hcells, vcells + + def plot(self, left, right, labels: Iterable[str] = (), vertical: bool = True): + """ + Plot left / right DataFrames in specified layout. + + Parameters + ---------- + left : list of DataFrames before operation is applied + right : DataFrame of operation result + labels : list of str to be drawn as titles of left DataFrames + vertical : bool, default True + If True, use vertical layout. If False, use horizontal layout. + """ + from matplotlib import gridspec + import matplotlib.pyplot as plt + + if not isinstance(left, list): + left = [left] + left = [self._conv(df) for df in left] + right = self._conv(right) + + hcells, vcells = self._get_cells(left, right, vertical) + + if vertical: + figsize = self.cell_width * hcells, self.cell_height * vcells + else: + # include margin for titles + figsize = self.cell_width * hcells, self.cell_height * vcells + fig = plt.figure(figsize=figsize) + + if vertical: + gs = gridspec.GridSpec(len(left), hcells) + # left + max_left_cols = max(self._shape(df)[1] for df in left) + max_left_rows = max(self._shape(df)[0] for df in left) + for i, (_left, _label) in enumerate(zip(left, labels)): + ax = fig.add_subplot(gs[i, 0:max_left_cols]) + self._make_table(ax, _left, title=_label, height=1.0 / max_left_rows) + # right + ax = plt.subplot(gs[:, max_left_cols:]) + self._make_table(ax, right, title="Result", height=1.05 / vcells) + fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) + else: + max_rows = max(self._shape(df)[0] for df in left + [right]) + height = 1.0 / np.max(max_rows) + gs = gridspec.GridSpec(1, hcells) + # left + i = 0 + for df, _label in zip(left, labels): + sp = self._shape(df) + ax = fig.add_subplot(gs[0, i : i + sp[1]]) + self._make_table(ax, df, title=_label, height=height) + i += sp[1] + # right + ax = plt.subplot(gs[0, i:]) + self._make_table(ax, right, title="Result", height=height) + fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) + + return fig + + def _conv(self, data): + """ + Convert each input to appropriate for table outplot. + """ + if isinstance(data, pd.Series): + if data.name is None: + data = data.to_frame(name="") + else: + data = data.to_frame() + data = data.fillna("NaN") + return data + + def _insert_index(self, data): + # insert is destructive + data = data.copy() + idx_nlevels = data.index.nlevels + if idx_nlevels == 1: + data.insert(0, "Index", data.index) + else: + for i in range(idx_nlevels): + data.insert(i, f"Index{i}", data.index._get_level_values(i)) + + col_nlevels = data.columns.nlevels + if col_nlevels > 1: + col = data.columns._get_level_values(0) + values = [ + data.columns._get_level_values(i)._values for i in range(1, col_nlevels) + ] + col_df = pd.DataFrame(values) + data.columns = col_df.columns + data = pd.concat([col_df, data]) + data.columns = col + return data + + def _make_table(self, ax, df, title: str, height: float | None = None) -> None: + if df is None: + ax.set_visible(False) + return + + from pandas import plotting + + idx_nlevels = df.index.nlevels + col_nlevels = df.columns.nlevels + # must be convert here to get index levels for colorization + df = self._insert_index(df) + tb = plotting.table(ax, df, loc=9) + tb.set_fontsize(self.font_size) + + if height is None: + height = 1.0 / (len(df) + 1) + + props = tb.properties() + for (r, c), cell in props["celld"].items(): + if c == -1: + cell.set_visible(False) + elif r < col_nlevels and c < idx_nlevels: + cell.set_visible(False) + elif r < col_nlevels or c < idx_nlevels: + cell.set_facecolor("#AAAAAA") + cell.set_height(height) + + ax.set_title(title, size=self.font_size) + ax.axis("off") + + +def main() -> None: + import matplotlib.pyplot as plt + + p = TablePlotter() + + df1 = pd.DataFrame({"A": [10, 11, 12], "B": [20, 21, 22], "C": [30, 31, 32]}) + df2 = pd.DataFrame({"A": [10, 12], "C": [30, 32]}) + + p.plot([df1, df2], pd.concat([df1, df2]), labels=["df1", "df2"], vertical=True) + plt.show() + + df3 = pd.DataFrame({"X": [10, 12], "Z": [30, 32]}) + + p.plot( + [df1, df3], pd.concat([df1, df3], axis=1), labels=["df1", "df2"], vertical=False + ) + plt.show() + + idx = pd.MultiIndex.from_tuples( + [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")] + ) + column = pd.MultiIndex.from_tuples([(1, "A"), (1, "B")]) + df3 = pd.DataFrame({"v1": [1, 2, 3, 4, 5, 6], "v2": [5, 6, 7, 8, 9, 10]}, index=idx) + df3.columns = column + p.plot(df3, df3, labels=["df3"]) + plt.show() + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/pandas/util/_exceptions.py b/venv/lib/python3.10/site-packages/pandas/util/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..5f50838d373154868ff7414775763a1c66853c65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/_exceptions.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import contextlib +import inspect +import os +import re +from typing import TYPE_CHECKING +import warnings + +if TYPE_CHECKING: + from collections.abc import Generator + from types import FrameType + + +@contextlib.contextmanager +def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]: + """ + Rewrite the message of an exception. + """ + try: + yield + except Exception as err: + if not err.args: + raise + msg = str(err.args[0]) + msg = msg.replace(old_name, new_name) + args: tuple[str, ...] = (msg,) + if len(err.args) > 1: + args = args + err.args[1:] + err.args = args + raise + + +def find_stack_level() -> int: + """ + Find the first place in the stack that is not inside pandas + (tests notwithstanding). + """ + + import pandas as pd + + pkg_dir = os.path.dirname(pd.__file__) + test_dir = os.path.join(pkg_dir, "tests") + + # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow + frame: FrameType | None = inspect.currentframe() + try: + n = 0 + while frame: + filename = inspect.getfile(frame) + if filename.startswith(pkg_dir) and not filename.startswith(test_dir): + frame = frame.f_back + n += 1 + else: + break + finally: + # See note in + # https://docs.python.org/3/library/inspect.html#inspect.Traceback + del frame + return n + + +@contextlib.contextmanager +def rewrite_warning( + target_message: str, + target_category: type[Warning], + new_message: str, + new_category: type[Warning] | None = None, +) -> Generator[None, None, None]: + """ + Rewrite the message of a warning. + + Parameters + ---------- + target_message : str + Warning message to match. + target_category : Warning + Warning type to match. + new_message : str + New warning message to emit. + new_category : Warning or None, default None + New warning type to emit. When None, will be the same as target_category. + """ + if new_category is None: + new_category = target_category + with warnings.catch_warnings(record=True) as record: + yield + if len(record) > 0: + match = re.compile(target_message) + for warning in record: + if warning.category is target_category and re.search( + match, str(warning.message) + ): + category = new_category + message: Warning | str = new_message + else: + category, message = warning.category, warning.message + warnings.warn_explicit( + message=message, + category=category, + filename=warning.filename, + lineno=warning.lineno, + ) diff --git a/venv/lib/python3.10/site-packages/pandas/util/_print_versions.py b/venv/lib/python3.10/site-packages/pandas/util/_print_versions.py new file mode 100644 index 0000000000000000000000000000000000000000..e39c2f7badb1d1b6d513d07f328340b9d796fff2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/_print_versions.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +import codecs +import json +import locale +import os +import platform +import struct +import sys +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pandas._typing import JSONSerializable + +from pandas.compat._optional import ( + VERSIONS, + get_version, + import_optional_dependency, +) + + +def _get_commit_hash() -> str | None: + """ + Use vendored versioneer code to get git hash, which handles + git worktree correctly. + """ + try: + from pandas._version_meson import ( # pyright: ignore [reportMissingImports] + __git_version__, + ) + + return __git_version__ + except ImportError: + from pandas._version import get_versions + + versions = get_versions() + return versions["full-revisionid"] + + +def _get_sys_info() -> dict[str, JSONSerializable]: + """ + Returns system information as a JSON serializable dictionary. + """ + uname_result = platform.uname() + language_code, encoding = locale.getlocale() + return { + "commit": _get_commit_hash(), + "python": ".".join([str(i) for i in sys.version_info]), + "python-bits": struct.calcsize("P") * 8, + "OS": uname_result.system, + "OS-release": uname_result.release, + "Version": uname_result.version, + "machine": uname_result.machine, + "processor": uname_result.processor, + "byteorder": sys.byteorder, + "LC_ALL": os.environ.get("LC_ALL"), + "LANG": os.environ.get("LANG"), + "LOCALE": {"language-code": language_code, "encoding": encoding}, + } + + +def _get_dependency_info() -> dict[str, JSONSerializable]: + """ + Returns dependency information as a JSON serializable dictionary. + """ + deps = [ + "pandas", + # required + "numpy", + "pytz", + "dateutil", + # install / build, + "setuptools", + "pip", + "Cython", + # test + "pytest", + "hypothesis", + # docs + "sphinx", + # Other, need a min version + "blosc", + "feather", + "xlsxwriter", + "lxml.etree", + "html5lib", + "pymysql", + "psycopg2", + "jinja2", + # Other, not imported. + "IPython", + "pandas_datareader", + ] + deps.extend(list(VERSIONS)) + + result: dict[str, JSONSerializable] = {} + for modname in deps: + mod = import_optional_dependency(modname, errors="ignore") + result[modname] = get_version(mod) if mod else None + return result + + +def show_versions(as_json: str | bool = False) -> None: + """ + Provide useful information, important for bug reports. + + It comprises info about hosting operation system, pandas version, + and versions of other installed relative packages. + + Parameters + ---------- + as_json : str or bool, default False + * If False, outputs info in a human readable form to the console. + * If str, it will be considered as a path to a file. + Info will be written to that file in JSON format. + * If True, outputs info in JSON format to the console. + + Examples + -------- + >>> pd.show_versions() # doctest: +SKIP + Your output may look something like this: + INSTALLED VERSIONS + ------------------ + commit : 37ea63d540fd27274cad6585082c91b1283f963d + python : 3.10.6.final.0 + python-bits : 64 + OS : Linux + OS-release : 5.10.102.1-microsoft-standard-WSL2 + Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022 + machine : x86_64 + processor : x86_64 + byteorder : little + LC_ALL : None + LANG : en_GB.UTF-8 + LOCALE : en_GB.UTF-8 + pandas : 2.0.1 + numpy : 1.24.3 + ... + """ + sys_info = _get_sys_info() + deps = _get_dependency_info() + + if as_json: + j = {"system": sys_info, "dependencies": deps} + + if as_json is True: + sys.stdout.writelines(json.dumps(j, indent=2)) + else: + assert isinstance(as_json, str) # needed for mypy + with codecs.open(as_json, "wb", encoding="utf8") as f: + json.dump(j, f, indent=2) + + else: + assert isinstance(sys_info["LOCALE"], dict) # needed for mypy + language_code = sys_info["LOCALE"]["language-code"] + encoding = sys_info["LOCALE"]["encoding"] + sys_info["LOCALE"] = f"{language_code}.{encoding}" + + maxlen = max(len(x) for x in deps) + print("\nINSTALLED VERSIONS") + print("------------------") + for k, v in sys_info.items(): + print(f"{k:<{maxlen}}: {v}") + print("") + for k, v in deps.items(): + print(f"{k:<{maxlen}}: {v}") diff --git a/venv/lib/python3.10/site-packages/pandas/util/_test_decorators.py b/venv/lib/python3.10/site-packages/pandas/util/_test_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1912bce856dd2694447d820ea2c5124be9c1a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/_test_decorators.py @@ -0,0 +1,173 @@ +""" +This module provides decorator functions which can be applied to test objects +in order to skip those objects when certain conditions occur. A sample use case +is to detect if the platform is missing ``matplotlib``. If so, any test objects +which require ``matplotlib`` and decorated with ``@td.skip_if_no("matplotlib")`` +will be skipped by ``pytest`` during the execution of the test suite. + +To illustrate, after importing this module: + +import pandas.util._test_decorators as td + +The decorators can be applied to classes: + +@td.skip_if_no("package") +class Foo: + ... + +Or individual functions: + +@td.skip_if_no("package") +def test_foo(): + ... + +For more information, refer to the ``pytest`` documentation on ``skipif``. +""" +from __future__ import annotations + +import locale +from typing import ( + TYPE_CHECKING, + Callable, +) + +import pytest + +from pandas._config import get_option + +if TYPE_CHECKING: + from pandas._typing import F + +from pandas._config.config import _get_option + +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat._optional import import_optional_dependency + + +def skip_if_installed(package: str) -> pytest.MarkDecorator: + """ + Skip a test if a package is installed. + + Parameters + ---------- + package : str + The name of the package. + + Returns + ------- + pytest.MarkDecorator + a pytest.mark.skipif to use as either a test decorator or a + parametrization mark. + """ + return pytest.mark.skipif( + bool(import_optional_dependency(package, errors="ignore")), + reason=f"Skipping because {package} is installed.", + ) + + +def skip_if_no(package: str, min_version: str | None = None) -> pytest.MarkDecorator: + """ + Generic function to help skip tests when required packages are not + present on the testing system. + + This function returns a pytest mark with a skip condition that will be + evaluated during test collection. An attempt will be made to import the + specified ``package`` and optionally ensure it meets the ``min_version`` + + The mark can be used as either a decorator for a test class or to be + applied to parameters in pytest.mark.parametrize calls or parametrized + fixtures. Use pytest.importorskip if an imported moduled is later needed + or for test functions. + + If the import and version check are unsuccessful, then the test function + (or test case when used in conjunction with parametrization) will be + skipped. + + Parameters + ---------- + package: str + The name of the required package. + min_version: str or None, default None + Optional minimum version of the package. + + Returns + ------- + pytest.MarkDecorator + a pytest.mark.skipif to use as either a test decorator or a + parametrization mark. + """ + msg = f"Could not import '{package}'" + if min_version: + msg += f" satisfying a min_version of {min_version}" + return pytest.mark.skipif( + not bool( + import_optional_dependency( + package, errors="ignore", min_version=min_version + ) + ), + reason=msg, + ) + + +skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit") +skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows") +skip_if_not_us_locale = pytest.mark.skipif( + locale.getlocale()[0] != "en_US", + reason=f"Set local {locale.getlocale()[0]} is not en_US", +) + + +def parametrize_fixture_doc(*args) -> Callable[[F], F]: + """ + Intended for use as a decorator for parametrized fixture, + this function will wrap the decorated function with a pytest + ``parametrize_fixture_doc`` mark. That mark will format + initial fixture docstring by replacing placeholders {0}, {1} etc + with parameters passed as arguments. + + Parameters + ---------- + args: iterable + Positional arguments for docstring. + + Returns + ------- + function + The decorated function wrapped within a pytest + ``parametrize_fixture_doc`` mark + """ + + def documented_fixture(fixture): + fixture.__doc__ = fixture.__doc__.format(*args) + return fixture + + return documented_fixture + + +def mark_array_manager_not_yet_implemented(request) -> None: + mark = pytest.mark.xfail(reason="Not yet implemented for ArrayManager") + request.applymarker(mark) + + +skip_array_manager_not_yet_implemented = pytest.mark.xfail( + _get_option("mode.data_manager", silent=True) == "array", + reason="Not yet implemented for ArrayManager", +) + +skip_array_manager_invalid_test = pytest.mark.skipif( + _get_option("mode.data_manager", silent=True) == "array", + reason="Test that relies on BlockManager internals or specific behaviour", +) + +skip_copy_on_write_not_yet_implemented = pytest.mark.xfail( + get_option("mode.copy_on_write") is True, + reason="Not yet implemented/adapted for Copy-on-Write mode", +) + +skip_copy_on_write_invalid_test = pytest.mark.skipif( + get_option("mode.copy_on_write") is True, + reason="Test not valid for Copy-on-Write mode", +) diff --git a/venv/lib/python3.10/site-packages/pandas/util/_tester.py b/venv/lib/python3.10/site-packages/pandas/util/_tester.py new file mode 100644 index 0000000000000000000000000000000000000000..7cfddef7ddff87275ebf31eb7ec10e65d26f8668 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/_tester.py @@ -0,0 +1,53 @@ +""" +Entrypoint for testing from the top-level namespace. +""" +from __future__ import annotations + +import os +import sys + +from pandas.compat._optional import import_optional_dependency + +PKG = os.path.dirname(os.path.dirname(__file__)) + + +def test(extra_args: list[str] | None = None, run_doctests: bool = False) -> None: + """ + Run the pandas test suite using pytest. + + By default, runs with the marks -m "not slow and not network and not db" + + Parameters + ---------- + extra_args : list[str], default None + Extra marks to run the tests. + run_doctests : bool, default False + Whether to only run the Python and Cython doctests. If you would like to run + both doctests/regular tests, just append "--doctest-modules"/"--doctest-cython" + to extra_args. + + Examples + -------- + >>> pd.test() # doctest: +SKIP + running: pytest... + """ + pytest = import_optional_dependency("pytest") + import_optional_dependency("hypothesis") + cmd = ["-m not slow and not network and not db"] + if extra_args: + if not isinstance(extra_args, list): + extra_args = [extra_args] + cmd = extra_args + if run_doctests: + cmd = [ + "--doctest-modules", + "--doctest-cython", + f"--ignore={os.path.join(PKG, 'tests')}", + ] + cmd += [PKG] + joined = " ".join(cmd) + print(f"running: pytest {joined}") + sys.exit(pytest.main(cmd)) + + +__all__ = ["test"] diff --git a/venv/lib/python3.10/site-packages/pandas/util/_validators.py b/venv/lib/python3.10/site-packages/pandas/util/_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..cb0b4d549f49ea972d50c97986c60be64c021c3c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/_validators.py @@ -0,0 +1,456 @@ +""" +Module that contains many useful utilities +for validating data or function arguments +""" +from __future__ import annotations + +from collections.abc import ( + Iterable, + Sequence, +) +from typing import ( + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.common import ( + is_bool, + is_integer, +) + +BoolishT = TypeVar("BoolishT", bool, int) +BoolishNoneT = TypeVar("BoolishNoneT", bool, int, None) + + +def _check_arg_length(fname, args, max_fname_arg_count, compat_args) -> None: + """ + Checks whether 'args' has length of at most 'compat_args'. Raises + a TypeError if that is not the case, similar to in Python when a + function is called with too many arguments. + """ + if max_fname_arg_count < 0: + raise ValueError("'max_fname_arg_count' must be non-negative") + + if len(args) > len(compat_args): + max_arg_count = len(compat_args) + max_fname_arg_count + actual_arg_count = len(args) + max_fname_arg_count + argument = "argument" if max_arg_count == 1 else "arguments" + + raise TypeError( + f"{fname}() takes at most {max_arg_count} {argument} " + f"({actual_arg_count} given)" + ) + + +def _check_for_default_values(fname, arg_val_dict, compat_args) -> None: + """ + Check that the keys in `arg_val_dict` are mapped to their + default values as specified in `compat_args`. + + Note that this function is to be called only when it has been + checked that arg_val_dict.keys() is a subset of compat_args + """ + for key in arg_val_dict: + # try checking equality directly with '=' operator, + # as comparison may have been overridden for the left + # hand object + try: + v1 = arg_val_dict[key] + v2 = compat_args[key] + + # check for None-ness otherwise we could end up + # comparing a numpy array vs None + if (v1 is not None and v2 is None) or (v1 is None and v2 is not None): + match = False + else: + match = v1 == v2 + + if not is_bool(match): + raise ValueError("'match' is not a boolean") + + # could not compare them directly, so try comparison + # using the 'is' operator + except ValueError: + match = arg_val_dict[key] is compat_args[key] + + if not match: + raise ValueError( + f"the '{key}' parameter is not supported in " + f"the pandas implementation of {fname}()" + ) + + +def validate_args(fname, args, max_fname_arg_count, compat_args) -> None: + """ + Checks whether the length of the `*args` argument passed into a function + has at most `len(compat_args)` arguments and whether or not all of these + elements in `args` are set to their default values. + + Parameters + ---------- + fname : str + The name of the function being passed the `*args` parameter + args : tuple + The `*args` parameter passed into a function + max_fname_arg_count : int + The maximum number of arguments that the function `fname` + can accept, excluding those in `args`. Used for displaying + appropriate error messages. Must be non-negative. + compat_args : dict + A dictionary of keys and their associated default values. + In order to accommodate buggy behaviour in some versions of `numpy`, + where a signature displayed keyword arguments but then passed those + arguments **positionally** internally when calling downstream + implementations, a dict ensures that the original + order of the keyword arguments is enforced. + + Raises + ------ + TypeError + If `args` contains more values than there are `compat_args` + ValueError + If `args` contains values that do not correspond to those + of the default values specified in `compat_args` + """ + _check_arg_length(fname, args, max_fname_arg_count, compat_args) + + # We do this so that we can provide a more informative + # error message about the parameters that we are not + # supporting in the pandas implementation of 'fname' + kwargs = dict(zip(compat_args, args)) + _check_for_default_values(fname, kwargs, compat_args) + + +def _check_for_invalid_keys(fname, kwargs, compat_args) -> None: + """ + Checks whether 'kwargs' contains any keys that are not + in 'compat_args' and raises a TypeError if there is one. + """ + # set(dict) --> set of the dictionary's keys + diff = set(kwargs) - set(compat_args) + + if diff: + bad_arg = next(iter(diff)) + raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'") + + +def validate_kwargs(fname, kwargs, compat_args) -> None: + """ + Checks whether parameters passed to the **kwargs argument in a + function `fname` are valid parameters as specified in `*compat_args` + and whether or not they are set to their default values. + + Parameters + ---------- + fname : str + The name of the function being passed the `**kwargs` parameter + kwargs : dict + The `**kwargs` parameter passed into `fname` + compat_args: dict + A dictionary of keys that `kwargs` is allowed to have and their + associated default values + + Raises + ------ + TypeError if `kwargs` contains keys not in `compat_args` + ValueError if `kwargs` contains keys in `compat_args` that do not + map to the default values specified in `compat_args` + """ + kwds = kwargs.copy() + _check_for_invalid_keys(fname, kwargs, compat_args) + _check_for_default_values(fname, kwds, compat_args) + + +def validate_args_and_kwargs( + fname, args, kwargs, max_fname_arg_count, compat_args +) -> None: + """ + Checks whether parameters passed to the *args and **kwargs argument in a + function `fname` are valid parameters as specified in `*compat_args` + and whether or not they are set to their default values. + + Parameters + ---------- + fname: str + The name of the function being passed the `**kwargs` parameter + args: tuple + The `*args` parameter passed into a function + kwargs: dict + The `**kwargs` parameter passed into `fname` + max_fname_arg_count: int + The minimum number of arguments that the function `fname` + requires, excluding those in `args`. Used for displaying + appropriate error messages. Must be non-negative. + compat_args: dict + A dictionary of keys that `kwargs` is allowed to + have and their associated default values. + + Raises + ------ + TypeError if `args` contains more values than there are + `compat_args` OR `kwargs` contains keys not in `compat_args` + ValueError if `args` contains values not at the default value (`None`) + `kwargs` contains keys in `compat_args` that do not map to the default + value as specified in `compat_args` + + See Also + -------- + validate_args : Purely args validation. + validate_kwargs : Purely kwargs validation. + + """ + # Check that the total number of arguments passed in (i.e. + # args and kwargs) does not exceed the length of compat_args + _check_arg_length( + fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args + ) + + # Check there is no overlap with the positional and keyword + # arguments, similar to what is done in actual Python functions + args_dict = dict(zip(compat_args, args)) + + for key in args_dict: + if key in kwargs: + raise TypeError( + f"{fname}() got multiple values for keyword argument '{key}'" + ) + + kwargs.update(args_dict) + validate_kwargs(fname, kwargs, compat_args) + + +def validate_bool_kwarg( + value: BoolishNoneT, + arg_name: str, + none_allowed: bool = True, + int_allowed: bool = False, +) -> BoolishNoneT: + """ + Ensure that argument passed in arg_name can be interpreted as boolean. + + Parameters + ---------- + value : bool + Value to be validated. + arg_name : str + Name of the argument. To be reflected in the error message. + none_allowed : bool, default True + Whether to consider None to be a valid boolean. + int_allowed : bool, default False + Whether to consider integer value to be a valid boolean. + + Returns + ------- + value + The same value as input. + + Raises + ------ + ValueError + If the value is not a valid boolean. + """ + good_value = is_bool(value) + if none_allowed: + good_value = good_value or (value is None) + + if int_allowed: + good_value = good_value or isinstance(value, int) + + if not good_value: + raise ValueError( + f'For argument "{arg_name}" expected type bool, received ' + f"type {type(value).__name__}." + ) + return value # pyright: ignore[reportGeneralTypeIssues] + + +def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True): + """ + Validate the keyword arguments to 'fillna'. + + This checks that exactly one of 'value' and 'method' is specified. + If 'method' is specified, this validates that it's a valid method. + + Parameters + ---------- + value, method : object + The 'value' and 'method' keyword arguments for 'fillna'. + validate_scalar_dict_value : bool, default True + Whether to validate that 'value' is a scalar or dict. Specifically, + validate that it is not a list or tuple. + + Returns + ------- + value, method : object + """ + from pandas.core.missing import clean_fill_method + + if value is None and method is None: + raise ValueError("Must specify a fill 'value' or 'method'.") + if value is None and method is not None: + method = clean_fill_method(method) + + elif value is not None and method is None: + if validate_scalar_dict_value and isinstance(value, (list, tuple)): + raise TypeError( + '"value" parameter must be a scalar or dict, but ' + f'you passed a "{type(value).__name__}"' + ) + + elif value is not None and method is not None: + raise ValueError("Cannot specify both 'value' and 'method'.") + + return value, method + + +def validate_percentile(q: float | Iterable[float]) -> np.ndarray: + """ + Validate percentiles (used by describe and quantile). + + This function checks if the given float or iterable of floats is a valid percentile + otherwise raises a ValueError. + + Parameters + ---------- + q: float or iterable of floats + A single percentile or an iterable of percentiles. + + Returns + ------- + ndarray + An ndarray of the percentiles if valid. + + Raises + ------ + ValueError if percentiles are not in given interval([0, 1]). + """ + q_arr = np.asarray(q) + # Don't change this to an f-string. The string formatting + # is too expensive for cases where we don't need it. + msg = "percentiles should all be in the interval [0, 1]" + if q_arr.ndim == 0: + if not 0 <= q_arr <= 1: + raise ValueError(msg) + else: + if not all(0 <= qs <= 1 for qs in q_arr): + raise ValueError(msg) + return q_arr + + +@overload +def validate_ascending(ascending: BoolishT) -> BoolishT: + ... + + +@overload +def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: + ... + + +def validate_ascending( + ascending: bool | int | Sequence[BoolishT], +) -> bool | int | list[BoolishT]: + """Validate ``ascending`` kwargs for ``sort_index`` method.""" + kwargs = {"none_allowed": False, "int_allowed": True} + if not isinstance(ascending, Sequence): + return validate_bool_kwarg(ascending, "ascending", **kwargs) + + return [validate_bool_kwarg(item, "ascending", **kwargs) for item in ascending] + + +def validate_endpoints(closed: str | None) -> tuple[bool, bool]: + """ + Check that the `closed` argument is among [None, "left", "right"] + + Parameters + ---------- + closed : {None, "left", "right"} + + Returns + ------- + left_closed : bool + right_closed : bool + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_closed = False + right_closed = False + + if closed is None: + left_closed = True + right_closed = True + elif closed == "left": + left_closed = True + elif closed == "right": + right_closed = True + else: + raise ValueError("Closed has to be either 'left', 'right' or None") + + return left_closed, right_closed + + +def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: + """ + Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. + + Parameters + ---------- + inclusive : {"both", "neither", "left", "right"} + + Returns + ------- + left_right_inclusive : tuple[bool, bool] + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_right_inclusive: tuple[bool, bool] | None = None + + if isinstance(inclusive, str): + left_right_inclusive = { + "both": (True, True), + "left": (True, False), + "right": (False, True), + "neither": (False, False), + }.get(inclusive) + + if left_right_inclusive is None: + raise ValueError( + "Inclusive has to be either 'both', 'neither', 'left' or 'right'" + ) + + return left_right_inclusive + + +def validate_insert_loc(loc: int, length: int) -> int: + """ + Check that we have an integer between -length and length, inclusive. + + Standardize negative loc to within [0, length]. + + The exceptions we raise on failure match np.insert. + """ + if not is_integer(loc): + raise TypeError(f"loc must be an integer between -{length} and {length}") + + if loc < 0: + loc += length + if not 0 <= loc <= length: + raise IndexError(f"loc must be an integer between -{length} and {length}") + return loc # pyright: ignore[reportGeneralTypeIssues] + + +def check_dtype_backend(dtype_backend) -> None: + if dtype_backend is not lib.no_default: + if dtype_backend not in ["numpy_nullable", "pyarrow"]: + raise ValueError( + f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " + f"'pyarrow' are allowed.", + ) diff --git a/venv/lib/python3.10/site-packages/pandas/util/version/__init__.py b/venv/lib/python3.10/site-packages/pandas/util/version/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5efbbb09c1e5085bf0564d819be345edd8c827 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/util/version/__init__.py @@ -0,0 +1,579 @@ +# Vendored from https://github.com/pypa/packaging/blob/main/packaging/_structures.py +# and https://github.com/pypa/packaging/blob/main/packaging/_structures.py +# changeset ae891fd74d6dd4c6063bb04f2faeadaac6fc6313 +# 04/30/2021 + +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. Licence at LICENSES/PACKAGING_LICENSE +from __future__ import annotations + +import collections +from collections.abc import Iterator +import itertools +import re +from typing import ( + Callable, + SupportsInt, + Tuple, + Union, +) +import warnings + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, type(self)) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> NegativeInfinityType: + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, type(self)) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() + + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + tuple[ + Union[ + SubLocalType, + tuple[SubLocalType, str], + tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = tuple[ + int, tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = tuple[int, tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> LegacyVersion | Version: + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + + Examples + -------- + >>> pd.util.version.Version('1.') + Traceback (most recent call last): + InvalidVersion: Invalid version: '1.' + """ + + +class _BaseVersion: + _key: CmpKey | LegacyCmpKey + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release.", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + mapped_part = _legacy_version_replacement_map.get(part, part) + + if not mapped_part or mapped_part == ".": + continue + + if mapped_part[:1] in "0123456789": + # pad for numeric comparison + yield mapped_part.zfill(8) + else: + yield "*" + mapped_part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: list[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join([str(x) for x in self.release]))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join([str(x) for x in self.pre]))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> tuple[int, ...]:
+        _release: tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> tuple[str, int] | None:
+        _pre: tuple[str, int] | None = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> int | None:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> int | None:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> str | None:
+        if self._version.local:
+            return ".".join([str(x) for x in self._version.local])
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join([str(x) for x in self.release]))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: str | bytes | SupportsInt
+) -> tuple[str, int] | None:
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> LocalType | None:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: tuple[int, ...],
+    pre: tuple[str, int] | None,
+    post: tuple[str, int] | None,
+    dev: tuple[str, int] | None,
+    local: tuple[SubLocalType] | None,
+) -> CmpKey:
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/venv/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..16ac379bc7955ba8d81cdc7dd8e311373394ad08
Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc differ