diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_138_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_138_mp_rank_02_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..3d0d113066a03a4f0187a309de107cc6cb6f3b74 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_138_mp_rank_02_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff76c17d334de5c86343d0dffcfc231327ebb290f99caffa78d01c910a492481 +size 41830340 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_186_mp_rank_00_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_186_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ca1b6d9ec201a67e372a7cde8e2af28d06271512 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_186_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:516bdd279e1eaf12b086ae0250fe787a6d24252660058e79839c4e9d454dafc3 +size 41830148 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_18_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_18_mp_rank_02_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2d4407d8e1294bcb31284f75d5982bdb77dfe98a --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_18_mp_rank_02_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60991694a610facdca259d6d3a14425b34cf10b109817e6b9ed21cdfc3c888aa +size 41830394 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_253_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_253_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..57bc71121093a6a391d3c783afeef0bb76b74030 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_253_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1136be4dc3a40f81e8ed1a5bce0c10118014b655260171b65f3667e6e92354b9 +size 41830340 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_4_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_4_mp_rank_02_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..8cec40e372f4d0528e2ec80dbe62597820ee8f36 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_4_mp_rank_02_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf8df5af07690b36f38efee63b0276fe30f320bb483b8dc2f73871868cf62bf2 +size 41830320 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_96_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_96_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..bdd91a85f964ea3466d8c8e468569d88efb3c1ef --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_96_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:930c0e049e732d909198b8bbd4950953337f97c72218ed4632cfa51ebb05c094 +size 41830330 diff --git a/ckpts/llama-3b/global_step100/layer_08-model_00-model_states.pt b/ckpts/llama-3b/global_step100/layer_08-model_00-model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..89dfca39674c61002136b2960a7eb7569c522bc4 --- /dev/null +++ b/ckpts/llama-3b/global_step100/layer_08-model_00-model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e448f50f7295acee88d602a64b8a4d486909d1fda052f4645476058f5021de57 +size 116407086 diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7655a013c6cf3fca754086fdeb29b806220d5e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py @@ -0,0 +1,9 @@ +""" +core.array_algos is for algorithms that operate on ndarray and ExtensionArray. +These should: + +- Assume that any Index, Series, or DataFrame objects have already been unwrapped. +- Assume that any list arguments have already been cast to ndarray/EA. +- Not depend on Index, Series, or DataFrame, nor import any of these. +- May dispatch to ExtensionArray methods, but should not import from core.arrays. +""" diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba0ab344d891bce69513876e507bd661b1836e2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/datetimelike_accumulations.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/datetimelike_accumulations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61580daadb704eb7e578ccc4fbf873d9b4c695e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/datetimelike_accumulations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_accumulations.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_accumulations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ab0000cc6626f3afba2c007adeca4f9ebf94ee4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_accumulations.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_reductions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f19873535f030b03082a6868c077770e9e2606b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_reductions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/putmask.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/putmask.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbcf54bf1559e24e314c91d48f9ecea7563d1f6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/putmask.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/quantile.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/quantile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba22daffa39a8803149b6b33ab815e1dcc3fde3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/quantile.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/replace.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/replace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e848cc2c181a224effcc419a190d88770cef098 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/replace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/take.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/take.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..519a4f09aef96673f8b3488174a7904386bf781b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/take.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/transforms.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/transforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a754981d170016c36f7519ca9ef9be62bfb4493 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/transforms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py new file mode 100644 index 0000000000000000000000000000000000000000..825fe60ee6cf88a2186a5f501c8696cecaf2657d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py @@ -0,0 +1,67 @@ +""" +datetimelke_accumulations.py is for accumulations of datetimelike extension arrays +""" + +from __future__ import annotations + +from typing import Callable + +import numpy as np + +from pandas._libs import iNaT + +from pandas.core.dtypes.missing import isna + + +def _cum_func( + func: Callable, + values: np.ndarray, + *, + skipna: bool = True, +): + """ + Accumulations for 1D datetimelike arrays. + + Parameters + ---------- + func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). Values is changed is modified inplace. + skipna : bool, default True + Whether to skip NA. + """ + try: + fill_value = { + np.maximum.accumulate: np.iinfo(np.int64).min, + np.cumsum: 0, + np.minimum.accumulate: np.iinfo(np.int64).max, + }[func] + except KeyError: + raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray") + + mask = isna(values) + y = values.view("i8") + y[mask] = fill_value + + if not skipna: + mask = np.maximum.accumulate(mask) + + result = func(y) + result[mask] = iNaT + + if values.dtype.kind in "mM": + return result.view(values.dtype.base) + return result + + +def cumsum(values: np.ndarray, *, skipna: bool = True) -> np.ndarray: + return _cum_func(np.cumsum, values, skipna=skipna) + + +def cummin(values: np.ndarray, *, skipna: bool = True): + return _cum_func(np.minimum.accumulate, values, skipna=skipna) + + +def cummax(values: np.ndarray, *, skipna: bool = True): + return _cum_func(np.maximum.accumulate, values, skipna=skipna) diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py new file mode 100644 index 0000000000000000000000000000000000000000..ad9e96d398a242dc64de2018b749fd2dbca7ed78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py @@ -0,0 +1,90 @@ +""" +masked_accumulations.py is for accumulation algorithms using a mask-based approach +for missing values. +""" + +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Callable, +) + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +def _cum_func( + func: Callable, + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, +): + """ + Accumulations for 1D masked array. + + We will modify values in place to replace NAs with the appropriate fill value. + + Parameters + ---------- + func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + """ + dtype_info: np.iinfo | np.finfo + if values.dtype.kind == "f": + dtype_info = np.finfo(values.dtype.type) + elif values.dtype.kind in "iu": + dtype_info = np.iinfo(values.dtype.type) + elif values.dtype.kind == "b": + # Max value of bool is 1, but since we are setting into a boolean + # array, 255 is fine as well. Min value has to be 0 when setting + # into the boolean array. + dtype_info = np.iinfo(np.uint8) + else: + raise NotImplementedError( + f"No masked accumulation defined for dtype {values.dtype.type}" + ) + try: + fill_value = { + np.cumprod: 1, + np.maximum.accumulate: dtype_info.min, + np.cumsum: 0, + np.minimum.accumulate: dtype_info.max, + }[func] + except KeyError: + raise NotImplementedError( + f"No accumulation for {func} implemented on BaseMaskedArray" + ) + + values[mask] = fill_value + + if not skipna: + mask = np.maximum.accumulate(mask) + + values = func(values) + return values, mask + + +def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.cumsum, values, mask, skipna=skipna) + + +def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.cumprod, values, mask, skipna=skipna) + + +def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna) + + +def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True): + return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna) diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_reductions.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..335fa1afc0f4e39956a05b567dcc98f0b98c66e3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_reductions.py @@ -0,0 +1,197 @@ +""" +masked_reductions.py is for reduction algorithms using a mask-based approach +for missing values. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Callable, +) +import warnings + +import numpy as np + +from pandas._libs import missing as libmissing + +from pandas.core.nanops import check_below_min_count + +if TYPE_CHECKING: + from pandas._typing import ( + AxisInt, + npt, + ) + + +def _reductions( + func: Callable, + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = None, + **kwargs, +): + """ + Sum, mean or product for 1D masked array. + + Parameters + ---------- + func : np.sum or np.prod + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray[bool] + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + min_count : int, default 0 + The required number of valid values to perform the operation. If fewer than + ``min_count`` non-NA values are present the result will be NA. + axis : int, optional, default None + """ + if not skipna: + if mask.any() or check_below_min_count(values.shape, None, min_count): + return libmissing.NA + else: + return func(values, axis=axis, **kwargs) + else: + if check_below_min_count(values.shape, mask, min_count) and ( + axis is None or values.ndim == 1 + ): + return libmissing.NA + + return func(values, where=~mask, axis=axis, **kwargs) + + +def sum( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = None, +): + return _reductions( + np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis + ) + + +def prod( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + min_count: int = 0, + axis: AxisInt | None = None, +): + return _reductions( + np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis + ) + + +def _minmax( + func: Callable, + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, +): + """ + Reduction for 1D masked array. + + Parameters + ---------- + func : np.min or np.max + values : np.ndarray + Numpy array with the values (can be of any dtype that support the + operation). + mask : np.ndarray[bool] + Boolean numpy array (True values indicate missing values). + skipna : bool, default True + Whether to skip NA. + axis : int, optional, default None + """ + if not skipna: + if mask.any() or not values.size: + # min/max with empty array raise in numpy, pandas returns NA + return libmissing.NA + else: + return func(values, axis=axis) + else: + subset = values[~mask] + if subset.size: + return func(subset, axis=axis) + else: + # min/max with empty array raise in numpy, pandas returns NA + return libmissing.NA + + +def min( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, +): + return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis) + + +def max( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, +): + return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis) + + +def mean( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, +): + if not values.size or mask.all(): + return libmissing.NA + return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis) + + +def var( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, + ddof: int = 1, +): + if not values.size or mask.all(): + return libmissing.NA + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + return _reductions( + np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof + ) + + +def std( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + *, + skipna: bool = True, + axis: AxisInt | None = None, + ddof: int = 1, +): + if not values.size or mask.all(): + return libmissing.NA + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", RuntimeWarning) + return _reductions( + np.std, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py new file mode 100644 index 0000000000000000000000000000000000000000..f65d2d20e028e36b35a397d8ac973f184ce1412c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py @@ -0,0 +1,149 @@ +""" +EA-compatible analogue to np.putmask +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.cast import infer_dtype_from +from pandas.core.dtypes.common import is_list_like + +from pandas.core.arrays import ExtensionArray + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + npt, + ) + + from pandas import MultiIndex + + +def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None: + """ + ExtensionArray-compatible implementation of np.putmask. The main + difference is we do not handle repeating or truncating like numpy. + + Parameters + ---------- + values: np.ndarray or ExtensionArray + mask : np.ndarray[bool] + We assume extract_bool_array has already been called. + value : Any + """ + + if ( + not isinstance(values, np.ndarray) + or (values.dtype == object and not lib.is_scalar(value)) + # GH#43424: np.putmask raises TypeError if we cannot cast between types with + # rule = "safe", a stricter guarantee we may not have here + or ( + isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype) + ) + ): + # GH#19266 using np.putmask gives unexpected results with listlike value + # along with object dtype + if is_list_like(value) and len(value) == len(values): + values[mask] = value[mask] + else: + values[mask] = value + else: + # GH#37833 np.putmask is more performant than __setitem__ + np.putmask(values, mask, value) + + +def putmask_without_repeat( + values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any +) -> None: + """ + np.putmask will truncate or repeat if `new` is a listlike with + len(new) != len(values). We require an exact match. + + Parameters + ---------- + values : np.ndarray + mask : np.ndarray[bool] + new : Any + """ + if getattr(new, "ndim", 0) >= 1: + new = new.astype(values.dtype, copy=False) + + # TODO: this prob needs some better checking for 2D cases + nlocs = mask.sum() + if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1: + shape = np.shape(new) + # np.shape compat for if setitem_datetimelike_compat + # changed arraylike to list e.g. test_where_dt64_2d + if nlocs == shape[-1]: + # GH#30567 + # If length of ``new`` is less than the length of ``values``, + # `np.putmask` would first repeat the ``new`` array and then + # assign the masked values hence produces incorrect result. + # `np.place` on the other hand uses the ``new`` values at it is + # to place in the masked locations of ``values`` + np.place(values, mask, new) + # i.e. values[mask] = new + elif mask.shape[-1] == shape[-1] or shape[-1] == 1: + np.putmask(values, mask, new) + else: + raise ValueError("cannot assign mismatch length to masked array") + else: + np.putmask(values, mask, new) + + +def validate_putmask( + values: ArrayLike | MultiIndex, mask: np.ndarray +) -> tuple[npt.NDArray[np.bool_], bool]: + """ + Validate mask and check if this putmask operation is a no-op. + """ + mask = extract_bool_array(mask) + if mask.shape != values.shape: + raise ValueError("putmask: mask and data must be the same size") + + noop = not mask.any() + return mask, noop + + +def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]: + """ + If we have a SparseArray or BooleanArray, convert it to ndarray[bool]. + """ + if isinstance(mask, ExtensionArray): + # We could have BooleanArray, Sparse[bool], ... + # Except for BooleanArray, this is equivalent to just + # np.asarray(mask, dtype=bool) + mask = mask.to_numpy(dtype=bool, na_value=False) + + mask = np.asarray(mask, dtype=bool) + return mask + + +def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other): + """ + Parameters + ---------- + values : np.ndarray + num_set : int + For putmask, this is mask.sum() + other : Any + """ + if values.dtype == object: + dtype, _ = infer_dtype_from(other) + + if lib.is_np_dtype(dtype, "mM"): + # https://github.com/numpy/numpy/issues/12550 + # timedelta64 will incorrectly cast to int + if not is_list_like(other): + other = [other] * num_set + else: + other = list(other) + + return other diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..5c933294fb944f04dd3e9a64e4731ea4349254f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py @@ -0,0 +1,226 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Scalar, + npt, + ) + + +def quantile_compat( + values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str +) -> ArrayLike: + """ + Compute the quantiles of the given values for each quantile in `qs`. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + qs : np.ndarray[float64] + interpolation : str + + Returns + ------- + np.ndarray or ExtensionArray + """ + if isinstance(values, np.ndarray): + fill_value = na_value_for_dtype(values.dtype, compat=False) + mask = isna(values) + return quantile_with_mask(values, mask, fill_value, qs, interpolation) + else: + return values._quantile(qs, interpolation) + + +def quantile_with_mask( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + fill_value, + qs: npt.NDArray[np.float64], + interpolation: str, +) -> np.ndarray: + """ + Compute the quantiles of the given values for each quantile in `qs`. + + Parameters + ---------- + values : np.ndarray + For ExtensionArray, this is _values_for_factorize()[0] + mask : np.ndarray[bool] + mask = isna(values) + For ExtensionArray, this is computed before calling _value_for_factorize + fill_value : Scalar + The value to interpret fill NA entries with + For ExtensionArray, this is _values_for_factorize()[1] + qs : np.ndarray[float64] + interpolation : str + Type of interpolation + + Returns + ------- + np.ndarray + + Notes + ----- + Assumes values is already 2D. For ExtensionArray this means np.atleast_2d + has been called on _values_for_factorize()[0] + + Quantile is computed along axis=1. + """ + assert values.shape == mask.shape + if values.ndim == 1: + # unsqueeze, operate, re-squeeze + values = np.atleast_2d(values) + mask = np.atleast_2d(mask) + res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation) + return res_values[0] + + assert values.ndim == 2 + + is_empty = values.shape[1] == 0 + + if is_empty: + # create the array of na_values + # 2d len(values) * len(qs) + flat = np.array([fill_value] * len(qs)) + result = np.repeat(flat, len(values)).reshape(len(values), len(qs)) + else: + result = _nanpercentile( + values, + qs * 100.0, + na_value=fill_value, + mask=mask, + interpolation=interpolation, + ) + + result = np.asarray(result) + result = result.T + + return result + + +def _nanpercentile_1d( + values: np.ndarray, + mask: npt.NDArray[np.bool_], + qs: npt.NDArray[np.float64], + na_value: Scalar, + interpolation: str, +) -> Scalar | np.ndarray: + """ + Wrapper for np.percentile that skips missing values, specialized to + 1-dimensional case. + + Parameters + ---------- + values : array over which to find quantiles + mask : ndarray[bool] + locations in values that should be considered missing + qs : np.ndarray[float64] of quantile indices to find + na_value : scalar + value to return for empty or all-null values + interpolation : str + + Returns + ------- + quantiles : scalar or array + """ + # mask is Union[ExtensionArray, ndarray] + values = values[~mask] + + if len(values) == 0: + # Can't pass dtype=values.dtype here bc we might have na_value=np.nan + # with values.dtype=int64 see test_quantile_empty + # equiv: 'np.array([na_value] * len(qs))' but much faster + return np.full(len(qs), na_value) + + return np.percentile( + values, + qs, + # error: No overload variant of "percentile" matches argument + # types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]" + # , "Dict[str, str]" [call-overload] + method=interpolation, # type: ignore[call-overload] + ) + + +def _nanpercentile( + values: np.ndarray, + qs: npt.NDArray[np.float64], + *, + na_value, + mask: npt.NDArray[np.bool_], + interpolation: str, +): + """ + Wrapper for np.percentile that skips missing values. + + Parameters + ---------- + values : np.ndarray[ndim=2] over which to find quantiles + qs : np.ndarray[float64] of quantile indices to find + na_value : scalar + value to return for empty or all-null values + mask : np.ndarray[bool] + locations in values that should be considered missing + interpolation : str + + Returns + ------- + quantiles : scalar or array + """ + + if values.dtype.kind in "mM": + # need to cast to integer to avoid rounding errors in numpy + result = _nanpercentile( + values.view("i8"), + qs=qs, + na_value=na_value.view("i8"), + mask=mask, + interpolation=interpolation, + ) + + # Note: we have to do `astype` and not view because in general we + # have float result at this point, not i8 + return result.astype(values.dtype) + + if mask.any(): + # Caller is responsible for ensuring mask shape match + assert mask.shape == values.shape + result = [ + _nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation) + for (val, m) in zip(list(values), list(mask)) + ] + if values.dtype.kind == "f": + # preserve itemsize + result = np.asarray(result, dtype=values.dtype).T + else: + result = np.asarray(result).T + if ( + result.dtype != values.dtype + and not mask.all() + and (result == result.astype(values.dtype, copy=False)).all() + ): + # mask.all() will never get cast back to int + # e.g. values id integer dtype and result is floating dtype, + # only cast back to integer dtype if result values are all-integer. + result = result.astype(values.dtype, copy=False) + return result + else: + return np.percentile( + values, + qs, + axis=1, + # error: No overload variant of "percentile" matches argument types + # "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]", + # "int", "Dict[str, str]" [call-overload] + method=interpolation, # type: ignore[call-overload] + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/replace.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/replace.py new file mode 100644 index 0000000000000000000000000000000000000000..5f377276be480ec4d01c8cd1671fa95f9504c7c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/replace.py @@ -0,0 +1,152 @@ +""" +Methods used by Block.replace and related methods. +""" +from __future__ import annotations + +import operator +import re +from re import Pattern +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np + +from pandas.core.dtypes.common import ( + is_bool, + is_re, + is_re_compilable, +) +from pandas.core.dtypes.missing import isna + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Scalar, + npt, + ) + + +def should_use_regex(regex: bool, to_replace: Any) -> bool: + """ + Decide whether to treat `to_replace` as a regular expression. + """ + if is_re(to_replace): + regex = True + + regex = regex and is_re_compilable(to_replace) + + # Don't use regex if the pattern is empty. + regex = regex and re.compile(to_replace).pattern != "" + return regex + + +def compare_or_regex_search( + a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_] +) -> ArrayLike: + """ + Compare two array-like inputs of the same shape or two scalar values + + Calls operator.eq or re.search, depending on regex argument. If regex is + True, perform an element-wise regex matching. + + Parameters + ---------- + a : array-like + b : scalar or regex pattern + regex : bool + mask : np.ndarray[bool] + + Returns + ------- + mask : array-like of bool + """ + if isna(b): + return ~mask + + def _check_comparison_types( + result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern + ): + """ + Raises an error if the two arrays (a,b) cannot be compared. + Otherwise, returns the comparison result as expected. + """ + if is_bool(result) and isinstance(a, np.ndarray): + type_names = [type(a).__name__, type(b).__name__] + + type_names[0] = f"ndarray(dtype={a.dtype})" + + raise TypeError( + f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}" + ) + + if not regex or not should_use_regex(regex, b): + # TODO: should use missing.mask_missing? + op = lambda x: operator.eq(x, b) + else: + op = np.vectorize( + lambda x: bool(re.search(b, x)) + if isinstance(x, str) and isinstance(b, (str, Pattern)) + else False + ) + + # GH#32621 use mask to avoid comparing to NAs + if isinstance(a, np.ndarray): + a = a[mask] + + result = op(a) + + if isinstance(result, np.ndarray) and mask is not None: + # The shape of the mask can differ to that of the result + # since we may compare only a subset of a's or b's elements + tmp = np.zeros(mask.shape, dtype=np.bool_) + np.place(tmp, mask, result) + result = tmp + + _check_comparison_types(result, a, b) + return result + + +def replace_regex( + values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None +) -> None: + """ + Parameters + ---------- + values : ArrayLike + Object dtype. + rx : re.Pattern + value : Any + mask : np.ndarray[bool], optional + + Notes + ----- + Alters values in-place. + """ + + # deal with replacing values with objects (strings) that match but + # whose replacement is not a string (numeric, nan, object) + if isna(value) or not isinstance(value, str): + + def re_replacer(s): + if is_re(rx) and isinstance(s, str): + return value if rx.search(s) is not None else s + else: + return s + + else: + # value is guaranteed to be a string here, s can be either a string + # or null if it's null it gets returned + def re_replacer(s): + if is_re(rx) and isinstance(s, str): + return rx.sub(value, s) + else: + return s + + f = np.vectorize(re_replacer, otypes=[np.object_]) + + if mask is None: + values[:] = f(values) + else: + values[mask] = f(values[mask]) diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/take.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/take.py new file mode 100644 index 0000000000000000000000000000000000000000..ac674e31586e72040a8e1313f232a00299b961ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/take.py @@ -0,0 +1,594 @@ +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + cast, + overload, +) + +import numpy as np + +from pandas._libs import ( + algos as libalgos, + lib, +) + +from pandas.core.dtypes.cast import maybe_promote +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_dtype, +) +from pandas.core.dtypes.missing import na_value_for_dtype + +from pandas.core.construction import ensure_wrapped_if_datetimelike + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + AxisInt, + npt, + ) + + from pandas.core.arrays._mixins import NDArrayBackedExtensionArray + from pandas.core.arrays.base import ExtensionArray + + +@overload +def take_nd( + arr: np.ndarray, + indexer, + axis: AxisInt = ..., + fill_value=..., + allow_fill: bool = ..., +) -> np.ndarray: + ... + + +@overload +def take_nd( + arr: ExtensionArray, + indexer, + axis: AxisInt = ..., + fill_value=..., + allow_fill: bool = ..., +) -> ArrayLike: + ... + + +def take_nd( + arr: ArrayLike, + indexer, + axis: AxisInt = 0, + fill_value=lib.no_default, + allow_fill: bool = True, +) -> ArrayLike: + """ + Specialized Cython take which sets NaN values in one pass + + This dispatches to ``take`` defined on ExtensionArrays. + + Note: this function assumes that the indexer is a valid(ated) indexer with + no out of bound indices. + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + Input array. + indexer : ndarray + 1-D array of indices to take, subarrays corresponding to -1 value + indices are filed with fill_value + axis : int, default 0 + Axis to take from + fill_value : any, default np.nan + Fill value to replace -1 values with + allow_fill : bool, default True + If False, indexer is assumed to contain no -1 values so no filling + will be done. This short-circuits computation of a mask. Result is + undefined if allow_fill == False and -1 is present in indexer. + + Returns + ------- + subarray : np.ndarray or ExtensionArray + May be the same type as the input, or cast to an ndarray. + """ + if fill_value is lib.no_default: + fill_value = na_value_for_dtype(arr.dtype, compat=False) + elif lib.is_np_dtype(arr.dtype, "mM"): + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if arr.dtype != dtype: + # EA.take is strict about returning a new object of the same type + # so for that case cast upfront + arr = arr.astype(dtype) + + if not isinstance(arr, np.ndarray): + # i.e. ExtensionArray, + # includes for EA to catch DatetimeArray, TimedeltaArray + if not is_1d_only_ea_dtype(arr.dtype): + # i.e. DatetimeArray, TimedeltaArray + arr = cast("NDArrayBackedExtensionArray", arr) + return arr.take( + indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis + ) + + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + + arr = np.asarray(arr) + return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill) + + +def _take_nd_ndarray( + arr: np.ndarray, + indexer: npt.NDArray[np.intp] | None, + axis: AxisInt, + fill_value, + allow_fill: bool, +) -> np.ndarray: + if indexer is None: + indexer = np.arange(arr.shape[axis], dtype=np.intp) + dtype, fill_value = arr.dtype, arr.dtype.type() + else: + indexer = ensure_platform_int(indexer) + + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + arr, indexer, fill_value, allow_fill + ) + + flip_order = False + if arr.ndim == 2 and arr.flags.f_contiguous: + flip_order = True + + if flip_order: + arr = arr.T + axis = arr.ndim - axis - 1 + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out_shape_ = list(arr.shape) + out_shape_[axis] = len(indexer) + out_shape = tuple(out_shape_) + if arr.flags.f_contiguous and axis == arr.ndim - 1: + # minor tweak that can make an order-of-magnitude difference + # for dataframes initialized directly from 2-d ndarrays + # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its + # f-contiguous transpose) + out = np.empty(out_shape, dtype=dtype, order="F") + else: + out = np.empty(out_shape, dtype=dtype) + + func = _get_take_nd_function( + arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info + ) + func(arr, indexer, out, fill_value) + + if flip_order: + out = out.T + return out + + +def take_1d( + arr: ArrayLike, + indexer: npt.NDArray[np.intp], + fill_value=None, + allow_fill: bool = True, + mask: npt.NDArray[np.bool_] | None = None, +) -> ArrayLike: + """ + Specialized version for 1D arrays. Differences compared to `take_nd`: + + - Assumes input array has already been converted to numpy array / EA + - Assumes indexer is already guaranteed to be intp dtype ndarray + - Only works for 1D arrays + + To ensure the lowest possible overhead. + + Note: similarly to `take_nd`, this function assumes that the indexer is + a valid(ated) indexer with no out of bound indices. + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + Input array. + indexer : ndarray + 1-D array of indices to take (validated indices, intp dtype). + fill_value : any, default np.nan + Fill value to replace -1 values with + allow_fill : bool, default True + If False, indexer is assumed to contain no -1 values so no filling + will be done. This short-circuits computation of a mask. Result is + undefined if allow_fill == False and -1 is present in indexer. + mask : np.ndarray, optional, default None + If `allow_fill` is True, and the mask (where indexer == -1) is already + known, it can be passed to avoid recomputation. + """ + if not isinstance(arr, np.ndarray): + # ExtensionArray -> dispatch to their method + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + + if not allow_fill: + return arr.take(indexer) + + dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value( + arr, indexer, fill_value, True, mask + ) + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out = np.empty(indexer.shape, dtype=dtype) + + func = _get_take_nd_function( + arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info + ) + func(arr, indexer, out, fill_value) + + return out + + +def take_2d_multi( + arr: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + fill_value=np.nan, +) -> np.ndarray: + """ + Specialized Cython take which sets NaN values in one pass. + """ + # This is only called from one place in DataFrame._reindex_multi, + # so we know indexer is well-behaved. + assert indexer is not None + assert indexer[0] is not None + assert indexer[1] is not None + + row_idx, col_idx = indexer + + row_idx = ensure_platform_int(row_idx) + col_idx = ensure_platform_int(col_idx) + indexer = row_idx, col_idx + mask_info = None + + # check for promotion based on types only (do this first because + # it's faster than computing a mask) + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if dtype != arr.dtype: + # check if promotion is actually required based on indexer + row_mask = row_idx == -1 + col_mask = col_idx == -1 + row_needs = row_mask.any() + col_needs = col_mask.any() + mask_info = (row_mask, col_mask), (row_needs, col_needs) + + if not (row_needs or col_needs): + # if not, then depromote, set fill_value to dummy + # (it won't be used but we don't want the cython code + # to crash when trying to cast it to dtype) + dtype, fill_value = arr.dtype, arr.dtype.type() + + # at this point, it's guaranteed that dtype can hold both the arr values + # and the fill_value + out_shape = len(row_idx), len(col_idx) + out = np.empty(out_shape, dtype=dtype) + + func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) + if func is None and arr.dtype != out.dtype: + func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) + if func is not None: + func = _convert_wrapper(func, out.dtype) + + if func is not None: + func(arr, indexer, out=out, fill_value=fill_value) + else: + # test_reindex_multi + _take_2d_multi_object( + arr, indexer, out, fill_value=fill_value, mask_info=mask_info + ) + + return out + + +@functools.lru_cache +def _get_take_nd_function_cached( + ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt +): + """ + Part of _get_take_nd_function below that doesn't need `mask_info` and thus + can be cached (mask_info potentially contains a numpy ndarray which is not + hashable and thus cannot be used as argument for cached function). + """ + tup = (arr_dtype.name, out_dtype.name) + if ndim == 1: + func = _take_1d_dict.get(tup, None) + elif ndim == 2: + if axis == 0: + func = _take_2d_axis0_dict.get(tup, None) + else: + func = _take_2d_axis1_dict.get(tup, None) + if func is not None: + return func + + # We get here with string, uint, float16, and complex dtypes that could + # potentially be handled in algos_take_helper. + # Also a couple with (M8[ns], object) and (m8[ns], object) + tup = (out_dtype.name, out_dtype.name) + if ndim == 1: + func = _take_1d_dict.get(tup, None) + elif ndim == 2: + if axis == 0: + func = _take_2d_axis0_dict.get(tup, None) + else: + func = _take_2d_axis1_dict.get(tup, None) + if func is not None: + func = _convert_wrapper(func, out_dtype) + return func + + return None + + +def _get_take_nd_function( + ndim: int, + arr_dtype: np.dtype, + out_dtype: np.dtype, + axis: AxisInt = 0, + mask_info=None, +): + """ + Get the appropriate "take" implementation for the given dimension, axis + and dtypes. + """ + func = None + if ndim <= 2: + # for this part we don't need `mask_info` -> use the cached algo lookup + func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis) + + if func is None: + + def func(arr, indexer, out, fill_value=np.nan) -> None: + indexer = ensure_platform_int(indexer) + _take_nd_object( + arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info + ) + + return func + + +def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None): + def wrapper( + arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan + ) -> None: + if arr_dtype is not None: + arr = arr.view(arr_dtype) + if out_dtype is not None: + out = out.view(out_dtype) + if fill_wrap is not None: + # FIXME: if we get here with dt64/td64 we need to be sure we have + # matching resos + if fill_value.dtype.kind == "m": + fill_value = fill_value.astype("m8[ns]") + else: + fill_value = fill_value.astype("M8[ns]") + fill_value = fill_wrap(fill_value) + + f(arr, indexer, out, fill_value=fill_value) + + return wrapper + + +def _convert_wrapper(f, conv_dtype): + def wrapper( + arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan + ) -> None: + if conv_dtype == object: + # GH#39755 avoid casting dt64/td64 to integers + arr = ensure_wrapped_if_datetimelike(arr) + arr = arr.astype(conv_dtype) + f(arr, indexer, out, fill_value=fill_value) + + return wrapper + + +_take_1d_dict = { + ("int8", "int8"): libalgos.take_1d_int8_int8, + ("int8", "int32"): libalgos.take_1d_int8_int32, + ("int8", "int64"): libalgos.take_1d_int8_int64, + ("int8", "float64"): libalgos.take_1d_int8_float64, + ("int16", "int16"): libalgos.take_1d_int16_int16, + ("int16", "int32"): libalgos.take_1d_int16_int32, + ("int16", "int64"): libalgos.take_1d_int16_int64, + ("int16", "float64"): libalgos.take_1d_int16_float64, + ("int32", "int32"): libalgos.take_1d_int32_int32, + ("int32", "int64"): libalgos.take_1d_int32_int64, + ("int32", "float64"): libalgos.take_1d_int32_float64, + ("int64", "int64"): libalgos.take_1d_int64_int64, + ("int64", "float64"): libalgos.take_1d_int64_float64, + ("float32", "float32"): libalgos.take_1d_float32_float32, + ("float32", "float64"): libalgos.take_1d_float32_float64, + ("float64", "float64"): libalgos.take_1d_float64_float64, + ("object", "object"): libalgos.take_1d_object_object, + ("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8), + ("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64 + ), +} + +_take_2d_axis0_dict = { + ("int8", "int8"): libalgos.take_2d_axis0_int8_int8, + ("int8", "int32"): libalgos.take_2d_axis0_int8_int32, + ("int8", "int64"): libalgos.take_2d_axis0_int8_int64, + ("int8", "float64"): libalgos.take_2d_axis0_int8_float64, + ("int16", "int16"): libalgos.take_2d_axis0_int16_int16, + ("int16", "int32"): libalgos.take_2d_axis0_int16_int32, + ("int16", "int64"): libalgos.take_2d_axis0_int16_int64, + ("int16", "float64"): libalgos.take_2d_axis0_int16_float64, + ("int32", "int32"): libalgos.take_2d_axis0_int32_int32, + ("int32", "int64"): libalgos.take_2d_axis0_int32_int64, + ("int32", "float64"): libalgos.take_2d_axis0_int32_float64, + ("int64", "int64"): libalgos.take_2d_axis0_int64_int64, + ("int64", "float64"): libalgos.take_2d_axis0_int64_float64, + ("float32", "float32"): libalgos.take_2d_axis0_float32_float32, + ("float32", "float64"): libalgos.take_2d_axis0_float32_float64, + ("float64", "float64"): libalgos.take_2d_axis0_float64_float64, + ("object", "object"): libalgos.take_2d_axis0_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_axis0_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + +_take_2d_axis1_dict = { + ("int8", "int8"): libalgos.take_2d_axis1_int8_int8, + ("int8", "int32"): libalgos.take_2d_axis1_int8_int32, + ("int8", "int64"): libalgos.take_2d_axis1_int8_int64, + ("int8", "float64"): libalgos.take_2d_axis1_int8_float64, + ("int16", "int16"): libalgos.take_2d_axis1_int16_int16, + ("int16", "int32"): libalgos.take_2d_axis1_int16_int32, + ("int16", "int64"): libalgos.take_2d_axis1_int16_int64, + ("int16", "float64"): libalgos.take_2d_axis1_int16_float64, + ("int32", "int32"): libalgos.take_2d_axis1_int32_int32, + ("int32", "int64"): libalgos.take_2d_axis1_int32_int64, + ("int32", "float64"): libalgos.take_2d_axis1_int32_float64, + ("int64", "int64"): libalgos.take_2d_axis1_int64_int64, + ("int64", "float64"): libalgos.take_2d_axis1_int64_float64, + ("float32", "float32"): libalgos.take_2d_axis1_float32_float32, + ("float32", "float64"): libalgos.take_2d_axis1_float32_float64, + ("float64", "float64"): libalgos.take_2d_axis1_float64_float64, + ("object", "object"): libalgos.take_2d_axis1_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_axis1_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + +_take_2d_multi_dict = { + ("int8", "int8"): libalgos.take_2d_multi_int8_int8, + ("int8", "int32"): libalgos.take_2d_multi_int8_int32, + ("int8", "int64"): libalgos.take_2d_multi_int8_int64, + ("int8", "float64"): libalgos.take_2d_multi_int8_float64, + ("int16", "int16"): libalgos.take_2d_multi_int16_int16, + ("int16", "int32"): libalgos.take_2d_multi_int16_int32, + ("int16", "int64"): libalgos.take_2d_multi_int16_int64, + ("int16", "float64"): libalgos.take_2d_multi_int16_float64, + ("int32", "int32"): libalgos.take_2d_multi_int32_int32, + ("int32", "int64"): libalgos.take_2d_multi_int32_int64, + ("int32", "float64"): libalgos.take_2d_multi_int32_float64, + ("int64", "int64"): libalgos.take_2d_multi_int64_int64, + ("int64", "float64"): libalgos.take_2d_multi_int64_float64, + ("float32", "float32"): libalgos.take_2d_multi_float32_float32, + ("float32", "float64"): libalgos.take_2d_multi_float32_float64, + ("float64", "float64"): libalgos.take_2d_multi_float64_float64, + ("object", "object"): libalgos.take_2d_multi_object_object, + ("bool", "bool"): _view_wrapper( + libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8 + ), + ("bool", "object"): _view_wrapper( + libalgos.take_2d_multi_bool_object, np.uint8, None + ), + ("datetime64[ns]", "datetime64[ns]"): _view_wrapper( + libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), + ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper( + libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64 + ), +} + + +def _take_nd_object( + arr: np.ndarray, + indexer: npt.NDArray[np.intp], + out: np.ndarray, + axis: AxisInt, + fill_value, + mask_info, +) -> None: + if mask_info is not None: + mask, needs_masking = mask_info + else: + mask = indexer == -1 + needs_masking = mask.any() + if arr.dtype != out.dtype: + arr = arr.astype(out.dtype) + if arr.shape[axis] > 0: + arr.take(indexer, axis=axis, out=out) + if needs_masking: + outindexer = [slice(None)] * arr.ndim + outindexer[axis] = mask + out[tuple(outindexer)] = fill_value + + +def _take_2d_multi_object( + arr: np.ndarray, + indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], + out: np.ndarray, + fill_value, + mask_info, +) -> None: + # this is not ideal, performance-wise, but it's better than raising + # an exception (best to optimize in Cython to avoid getting here) + row_idx, col_idx = indexer # both np.intp + if mask_info is not None: + (row_mask, col_mask), (row_needs, col_needs) = mask_info + else: + row_mask = row_idx == -1 + col_mask = col_idx == -1 + row_needs = row_mask.any() + col_needs = col_mask.any() + if fill_value is not None: + if row_needs: + out[row_mask, :] = fill_value + if col_needs: + out[:, col_mask] = fill_value + for i, u_ in enumerate(row_idx): + if u_ != -1: + for j, v in enumerate(col_idx): + if v != -1: + out[i, j] = arr[u_, v] + + +def _take_preprocess_indexer_and_fill_value( + arr: np.ndarray, + indexer: npt.NDArray[np.intp], + fill_value, + allow_fill: bool, + mask: npt.NDArray[np.bool_] | None = None, +): + mask_info: tuple[np.ndarray | None, bool] | None = None + + if not allow_fill: + dtype, fill_value = arr.dtype, arr.dtype.type() + mask_info = None, False + else: + # check for promotion based on types only (do this first because + # it's faster than computing a mask) + dtype, fill_value = maybe_promote(arr.dtype, fill_value) + if dtype != arr.dtype: + # check if promotion is actually required based on indexer + if mask is not None: + needs_masking = True + else: + mask = indexer == -1 + needs_masking = bool(mask.any()) + mask_info = mask, needs_masking + if not needs_masking: + # if not, then depromote, set fill_value to dummy + # (it won't be used but we don't want the cython code + # to crash when trying to cast it to dtype) + dtype, fill_value = arr.dtype, arr.dtype.type() + + return dtype, fill_value, mask_info diff --git a/venv/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py b/venv/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..ec67244949e3db92cc811b19cdfcd5d1dd2b4de8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py @@ -0,0 +1,50 @@ +""" +transforms.py is for shape-preserving functions. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import ( + AxisInt, + Scalar, + ) + + +def shift( + values: np.ndarray, periods: int, axis: AxisInt, fill_value: Scalar +) -> np.ndarray: + new_values = values + + if periods == 0 or values.size == 0: + return new_values.copy() + + # make sure array sent to np.roll is c_contiguous + f_ordered = values.flags.f_contiguous + if f_ordered: + new_values = new_values.T + axis = new_values.ndim - axis - 1 + + if new_values.size: + new_values = np.roll( + new_values, + np.intp(periods), + axis=axis, + ) + + axis_indexer = [slice(None)] * values.ndim + if periods > 0: + axis_indexer[axis] = slice(None, periods) + else: + axis_indexer[axis] = slice(periods, None) + new_values[tuple(axis_indexer)] = fill_value + + # restore original order + if f_ordered: + new_values = new_values.T + + return new_values diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/cast.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/cast.py new file mode 100644 index 0000000000000000000000000000000000000000..7dd81ec59bc4974069a9efd353a6f46ecf21c621 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/cast.py @@ -0,0 +1,1973 @@ +""" +Routines for casting. +""" + +from __future__ import annotations + +import datetime as dt +import functools +from typing import ( + TYPE_CHECKING, + Any, + Literal, + TypeVar, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import ( + Interval, + Period, + lib, +) +from pandas._libs.missing import ( + NA, + NAType, + checknull, +) +from pandas._libs.tslibs import ( + NaT, + OutOfBoundsDatetime, + OutOfBoundsTimedelta, + Timedelta, + Timestamp, + is_supported_dtype, +) +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas.compat.numpy import np_version_gt2 +from pandas.errors import ( + IntCastingNaNError, + LossySetitemError, +) + +from pandas.core.dtypes.common import ( + ensure_int8, + ensure_int16, + ensure_int32, + ensure_int64, + ensure_object, + ensure_str, + is_bool, + is_complex, + is_float, + is_integer, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype as pandas_dtype_func, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + BaseMaskedDtype, + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PandasExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import is_list_like +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, + notna, +) + +from pandas.io._util import _arrow_dtype_mapping + +if TYPE_CHECKING: + from collections.abc import ( + Sequence, + Sized, + ) + + from pandas._typing import ( + ArrayLike, + Dtype, + DtypeObj, + NumpyIndexT, + Scalar, + npt, + ) + + from pandas import Index + from pandas.core.arrays import ( + Categorical, + DatetimeArray, + ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, + ) + + +_int8_max = np.iinfo(np.int8).max +_int16_max = np.iinfo(np.int16).max +_int32_max = np.iinfo(np.int32).max + +_dtype_obj = np.dtype(object) + +NumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray) + + +def maybe_convert_platform( + values: list | tuple | range | np.ndarray | ExtensionArray, +) -> ArrayLike: + """try to do platform conversion, allow ndarray or list here""" + arr: ArrayLike + + if isinstance(values, (list, tuple, range)): + arr = construct_1d_object_array_from_listlike(values) + else: + # The caller is responsible for ensuring that we have np.ndarray + # or ExtensionArray here. + arr = values + + if arr.dtype == _dtype_obj: + arr = cast(np.ndarray, arr) + arr = lib.maybe_convert_objects(arr) + + return arr + + +def is_nested_object(obj) -> bool: + """ + return a boolean if we have a nested object, e.g. a Series with 1 or + more Series elements + + This may not be necessarily be performant. + + """ + return bool( + isinstance(obj, ABCSeries) + and is_object_dtype(obj.dtype) + and any(isinstance(v, ABCSeries) for v in obj._values) + ) + + +def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar: + """ + Cast scalar to Timestamp or Timedelta if scalar is datetime-like + and dtype is not object. + + Parameters + ---------- + value : scalar + dtype : Dtype, optional + + Returns + ------- + scalar + """ + if dtype == _dtype_obj: + pass + elif isinstance(value, (np.datetime64, dt.datetime)): + value = Timestamp(value) + elif isinstance(value, (np.timedelta64, dt.timedelta)): + value = Timedelta(value) + + return value + + +def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType: + """ + If passed a scalar cast the scalar to a python native type. + + Parameters + ---------- + value : scalar or Series + + Returns + ------- + scalar or Series + """ + if is_float(value): + value = float(value) + elif is_integer(value): + value = int(value) + elif is_bool(value): + value = bool(value) + elif isinstance(value, (np.datetime64, np.timedelta64)): + value = maybe_box_datetimelike(value) + elif value is NA: + value = None + return value + + +def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar: + """ + Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting + into a numpy array. Failing to unbox would risk dropping nanoseconds. + + Notes + ----- + Caller is responsible for checking dtype.kind in "mM" + """ + if is_valid_na_for_dtype(value, dtype): + # GH#36541: can't fill array directly with pd.NaT + # > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT) + # ValueError: cannot convert float NaN to integer + value = dtype.type("NaT", "ns") + elif isinstance(value, Timestamp): + if value.tz is None: + value = value.to_datetime64() + elif not isinstance(dtype, DatetimeTZDtype): + raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype") + elif isinstance(value, Timedelta): + value = value.to_timedelta64() + + _disallow_mismatched_datetimelike(value, dtype) + return value + + +def _disallow_mismatched_datetimelike(value, dtype: DtypeObj): + """ + numpy allows np.array(dt64values, dtype="timedelta64[ns]") and + vice-versa, but we do not want to allow this, so we need to + check explicitly + """ + vdtype = getattr(value, "dtype", None) + if vdtype is None: + return + elif (vdtype.kind == "m" and dtype.kind == "M") or ( + vdtype.kind == "M" and dtype.kind == "m" + ): + raise TypeError(f"Cannot cast {repr(value)} to {dtype}") + + +@overload +def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray: + ... + + +@overload +def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike: + ... + + +def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike: + """ + try to cast to the specified dtype (e.g. convert back to bool/int + or could be an astype of float64->float32 + """ + if isinstance(result, ABCSeries): + result = result._values + do_round = False + + if isinstance(dtype, str): + if dtype == "infer": + inferred_type = lib.infer_dtype(result, skipna=False) + if inferred_type == "boolean": + dtype = "bool" + elif inferred_type == "integer": + dtype = "int64" + elif inferred_type == "datetime64": + dtype = "datetime64[ns]" + elif inferred_type in ["timedelta", "timedelta64"]: + dtype = "timedelta64[ns]" + + # try to upcast here + elif inferred_type == "floating": + dtype = "int64" + if issubclass(result.dtype.type, np.number): + do_round = True + + else: + # TODO: complex? what if result is already non-object? + dtype = "object" + + dtype = np.dtype(dtype) + + if not isinstance(dtype, np.dtype): + # enforce our signature annotation + raise TypeError(dtype) # pragma: no cover + + converted = maybe_downcast_numeric(result, dtype, do_round) + if converted is not result: + return converted + + # a datetimelike + # GH12821, iNaT is cast to float + if dtype.kind in "mM" and result.dtype.kind in "if": + result = result.astype(dtype) + + elif dtype.kind == "m" and result.dtype == _dtype_obj: + # test_where_downcast_to_td64 + result = cast(np.ndarray, result) + result = array_to_timedelta64(result) + + elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj: + result = cast(np.ndarray, result) + return np.asarray(maybe_cast_to_datetime(result, dtype=dtype)) + + return result + + +@overload +def maybe_downcast_numeric( + result: np.ndarray, dtype: np.dtype, do_round: bool = False +) -> np.ndarray: + ... + + +@overload +def maybe_downcast_numeric( + result: ExtensionArray, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: + ... + + +def maybe_downcast_numeric( + result: ArrayLike, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: + """ + Subset of maybe_downcast_to_dtype restricted to numeric dtypes. + + Parameters + ---------- + result : ndarray or ExtensionArray + dtype : np.dtype or ExtensionDtype + do_round : bool + + Returns + ------- + ndarray or ExtensionArray + """ + if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype): + # e.g. SparseDtype has no itemsize attr + return result + + def trans(x): + if do_round: + return x.round() + return x + + if dtype.kind == result.dtype.kind: + # don't allow upcasts here (except if empty) + if result.dtype.itemsize <= dtype.itemsize and result.size: + return result + + if dtype.kind in "biu": + if not result.size: + # if we don't have any elements, just astype it + return trans(result).astype(dtype) + + if isinstance(result, np.ndarray): + element = result.item(0) + else: + element = result.iloc[0] + if not isinstance(element, (np.integer, np.floating, int, float, bool)): + # a comparable, e.g. a Decimal may slip in here + return result + + if ( + issubclass(result.dtype.type, (np.object_, np.number)) + and notna(result).all() + ): + new_result = trans(result).astype(dtype) + if new_result.dtype.kind == "O" or result.dtype.kind == "O": + # np.allclose may raise TypeError on object-dtype + if (new_result == result).all(): + return new_result + else: + if np.allclose(new_result, result, rtol=0): + return new_result + + elif ( + issubclass(dtype.type, np.floating) + and result.dtype.kind != "b" + and not is_string_dtype(result.dtype) + ): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "overflow encountered in cast", RuntimeWarning + ) + new_result = result.astype(dtype) + + # Adjust tolerances based on floating point size + size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16} + + atol = size_tols.get(new_result.dtype.itemsize, 0.0) + + # Check downcast float values are still equal within 7 digits when + # converting from float64 to float32 + if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol): + return new_result + + elif dtype.kind == result.dtype.kind == "c": + new_result = result.astype(dtype) + + if np.array_equal(new_result, result, equal_nan=True): + # TODO: use tolerance like we do for float? + return new_result + + return result + + +def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT: + """ + If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit. + + Parameters + ---------- + arr : ndarray or ExtensionArray + + Returns + ------- + ndarray or ExtensionArray + """ + dtype = arr.dtype + if dtype.kind == "i" and dtype != np.int64: + return arr.astype(np.int64) + elif dtype.kind == "u" and dtype != np.uint64: + return arr.astype(np.uint64) + elif dtype.kind == "f" and dtype != np.float64: + return arr.astype(np.float64) + else: + return arr + + +def maybe_cast_pointwise_result( + result: ArrayLike, + dtype: DtypeObj, + numeric_only: bool = False, + same_dtype: bool = True, +) -> ArrayLike: + """ + Try casting result of a pointwise operation back to the original dtype if + appropriate. + + Parameters + ---------- + result : array-like + Result to cast. + dtype : np.dtype or ExtensionDtype + Input Series from which result was calculated. + numeric_only : bool, default False + Whether to cast only numerics or datetimes as well. + same_dtype : bool, default True + Specify dtype when calling _from_sequence + + Returns + ------- + result : array-like + result maybe casted to the dtype. + """ + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + if same_dtype: + result = _maybe_cast_to_extension_array(cls, result, dtype=dtype) + else: + result = _maybe_cast_to_extension_array(cls, result) + + elif (numeric_only and dtype.kind in "iufcb") or not numeric_only: + result = maybe_downcast_to_dtype(result, dtype) + + return result + + +def _maybe_cast_to_extension_array( + cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None +) -> ArrayLike: + """ + Call to `_from_sequence` that returns the object unchanged on Exception. + + Parameters + ---------- + cls : class, subclass of ExtensionArray + obj : arraylike + Values to pass to cls._from_sequence + dtype : ExtensionDtype, optional + + Returns + ------- + ExtensionArray or obj + """ + result: ArrayLike + + if dtype is not None: + try: + result = cls._from_scalars(obj, dtype=dtype) + except (TypeError, ValueError): + return obj + return result + + try: + result = cls._from_sequence(obj, dtype=dtype) + except Exception: + # We can't predict what downstream EA constructors may raise + result = obj + return result + + +@overload +def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: + ... + + +@overload +def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: + ... + + +def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: + """ + If we have a dtype that cannot hold NA values, find the best match that can. + """ + if isinstance(dtype, ExtensionDtype): + if dtype._can_hold_na: + return dtype + elif isinstance(dtype, IntervalDtype): + # TODO(GH#45349): don't special-case IntervalDtype, allow + # overriding instead of returning object below. + return IntervalDtype(np.float64, closed=dtype.closed) + return _dtype_obj + elif dtype.kind == "b": + return _dtype_obj + elif dtype.kind in "iu": + return np.dtype(np.float64) + return dtype + + +_canonical_nans = { + np.datetime64: np.datetime64("NaT", "ns"), + np.timedelta64: np.timedelta64("NaT", "ns"), + type(np.nan): np.nan, +} + + +def maybe_promote(dtype: np.dtype, fill_value=np.nan): + """ + Find the minimal dtype that can hold both the given dtype and fill_value. + + Parameters + ---------- + dtype : np.dtype + fill_value : scalar, default np.nan + + Returns + ------- + dtype + Upcasted from dtype argument if necessary. + fill_value + Upcasted from fill_value argument if necessary. + + Raises + ------ + ValueError + If fill_value is a non-scalar and dtype is not object. + """ + orig = fill_value + orig_is_nat = False + if checknull(fill_value): + # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740 + # avoid cache misses with NaN/NaT values that are not singletons + if fill_value is not NA: + try: + orig_is_nat = np.isnat(fill_value) + except TypeError: + pass + + fill_value = _canonical_nans.get(type(fill_value), fill_value) + + # for performance, we are using a cached version of the actual implementation + # of the function in _maybe_promote. However, this doesn't always work (in case + # of non-hashable arguments), so we fallback to the actual implementation if needed + try: + # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type + # "Type[Any]"; expected "Hashable" [arg-type] + dtype, fill_value = _maybe_promote_cached( + dtype, fill_value, type(fill_value) # type: ignore[arg-type] + ) + except TypeError: + # if fill_value is not hashable (required for caching) + dtype, fill_value = _maybe_promote(dtype, fill_value) + + if (dtype == _dtype_obj and orig is not None) or ( + orig_is_nat and np.datetime_data(orig)[0] != "ns" + ): + # GH#51592,53497 restore our potentially non-canonical fill_value + fill_value = orig + return dtype, fill_value + + +@functools.lru_cache +def _maybe_promote_cached(dtype, fill_value, fill_value_type): + # The cached version of _maybe_promote below + # This also use fill_value_type as (unused) argument to use this in the + # cache lookup -> to differentiate 1 and True + return _maybe_promote(dtype, fill_value) + + +def _maybe_promote(dtype: np.dtype, fill_value=np.nan): + # The actual implementation of the function, use `maybe_promote` above for + # a cached version. + if not is_scalar(fill_value): + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + if dtype != object: + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + raise ValueError("fill_value must be a scalar") + dtype = _dtype_obj + return dtype, fill_value + + if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM": + dtype = ensure_dtype_can_hold_na(dtype) + fv = na_value_for_dtype(dtype) + return dtype, fv + + elif isinstance(dtype, CategoricalDtype): + if fill_value in dtype.categories or isna(fill_value): + return dtype, fill_value + else: + return object, ensure_object(fill_value) + + elif isna(fill_value): + dtype = _dtype_obj + if fill_value is None: + # but we retain e.g. pd.NA + fill_value = np.nan + return dtype, fill_value + + # returns tuple of (dtype, fill_value) + if issubclass(dtype.type, np.datetime64): + inferred, fv = infer_dtype_from_scalar(fill_value) + if inferred == dtype: + return dtype, fv + + from pandas.core.arrays import DatetimeArray + + dta = DatetimeArray._from_sequence([], dtype="M8[ns]") + try: + fv = dta._validate_setitem_value(fill_value) + return dta.dtype, fv + except (ValueError, TypeError): + return _dtype_obj, fill_value + + elif issubclass(dtype.type, np.timedelta64): + inferred, fv = infer_dtype_from_scalar(fill_value) + if inferred == dtype: + return dtype, fv + + elif inferred.kind == "m": + # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns] + # see if we can losslessly cast it to our dtype + unit = np.datetime_data(dtype)[0] + try: + td = Timedelta(fill_value).as_unit(unit, round_ok=False) + except OutOfBoundsTimedelta: + return _dtype_obj, fill_value + else: + return dtype, td.asm8 + + return _dtype_obj, fill_value + + elif is_float(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, np.integer): + dtype = np.dtype(np.float64) + + elif dtype.kind == "f": + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.float64 and dtype is np.float32 + dtype = mst + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + + elif is_bool(fill_value): + if not issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif is_integer(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, np.integer): + if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type] + # upcast to prevent overflow + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + if dtype.kind == "f": + # Case where we disagree with numpy + dtype = np.dtype(np.object_) + + elif is_complex(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, (np.integer, np.floating)): + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.complex128 and dtype is np.complex64 + dtype = mst + + else: + dtype = np.dtype(np.object_) + + # in case we have a string that looked like a number + if issubclass(dtype.type, (bytes, str)): + dtype = np.dtype(np.object_) + + fill_value = _ensure_dtype_type(fill_value, dtype) + return dtype, fill_value + + +def _ensure_dtype_type(value, dtype: np.dtype): + """ + Ensure that the given value is an instance of the given dtype. + + e.g. if out dtype is np.complex64_, we should have an instance of that + as opposed to a python complex object. + + Parameters + ---------- + value : object + dtype : np.dtype + + Returns + ------- + object + """ + # Start with exceptions in which we do _not_ cast to numpy types + + if dtype == _dtype_obj: + return value + + # Note: before we get here we have already excluded isna(value) + return dtype.type(value) + + +def infer_dtype_from(val) -> tuple[DtypeObj, Any]: + """ + Interpret the dtype from a scalar or array. + + Parameters + ---------- + val : object + """ + if not is_list_like(val): + return infer_dtype_from_scalar(val) + return infer_dtype_from_array(val) + + +def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]: + """ + Interpret the dtype from a scalar. + + Parameters + ---------- + val : object + """ + dtype: DtypeObj = _dtype_obj + + # a 1-element ndarray + if isinstance(val, np.ndarray): + if val.ndim != 0: + msg = "invalid ndarray passed to infer_dtype_from_scalar" + raise ValueError(msg) + + dtype = val.dtype + val = lib.item_from_zerodim(val) + + elif isinstance(val, str): + # If we create an empty array using a string to infer + # the dtype, NumPy will only allocate one character per entry + # so this is kind of bad. Alternately we could use np.repeat + # instead of np.empty (but then you still don't want things + # coming out as np.str_! + + dtype = _dtype_obj + if using_pyarrow_string_dtype(): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype(storage="pyarrow_numpy") + + elif isinstance(val, (np.datetime64, dt.datetime)): + try: + val = Timestamp(val) + except OutOfBoundsDatetime: + return _dtype_obj, val + + if val is NaT or val.tz is None: + val = val.to_datetime64() + dtype = val.dtype + # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes + else: + dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz) + + elif isinstance(val, (np.timedelta64, dt.timedelta)): + try: + val = Timedelta(val) + except (OutOfBoundsTimedelta, OverflowError): + dtype = _dtype_obj + else: + if val is NaT: + val = np.timedelta64("NaT", "ns") + else: + val = val.asm8 + dtype = val.dtype + + elif is_bool(val): + dtype = np.dtype(np.bool_) + + elif is_integer(val): + if isinstance(val, np.integer): + dtype = np.dtype(type(val)) + else: + dtype = np.dtype(np.int64) + + try: + np.array(val, dtype=dtype) + except OverflowError: + dtype = np.array(val).dtype + + elif is_float(val): + if isinstance(val, np.floating): + dtype = np.dtype(type(val)) + else: + dtype = np.dtype(np.float64) + + elif is_complex(val): + dtype = np.dtype(np.complex128) + + if isinstance(val, Period): + dtype = PeriodDtype(freq=val.freq) + elif isinstance(val, Interval): + subtype = infer_dtype_from_scalar(val.left)[0] + dtype = IntervalDtype(subtype=subtype, closed=val.closed) + + return dtype, val + + +def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]: + """ + Convert datetimelike-keyed dicts to a Timestamp-keyed dict. + + Parameters + ---------- + d: dict-like object + + Returns + ------- + dict + """ + return {maybe_box_datetimelike(key): value for key, value in d.items()} + + +def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]: + """ + Infer the dtype from an array. + + Parameters + ---------- + arr : array + + Returns + ------- + tuple (pandas-compat dtype, array) + + + Examples + -------- + >>> np.asarray([1, '1']) + array(['1', '1'], dtype='>> infer_dtype_from_array([1, '1']) + (dtype('O'), [1, '1']) + """ + if isinstance(arr, np.ndarray): + return arr.dtype, arr + + if not is_list_like(arr): + raise TypeError("'arr' must be list-like") + + arr_dtype = getattr(arr, "dtype", None) + if isinstance(arr_dtype, ExtensionDtype): + return arr.dtype, arr + + elif isinstance(arr, ABCSeries): + return arr.dtype, np.asarray(arr) + + # don't force numpy coerce with nan's + inferred = lib.infer_dtype(arr, skipna=False) + if inferred in ["string", "bytes", "mixed", "mixed-integer"]: + return (np.dtype(np.object_), arr) + + arr = np.asarray(arr) + return arr.dtype, arr + + +def _maybe_infer_dtype_type(element): + """ + Try to infer an object's dtype, for use in arithmetic ops. + + Uses `element.dtype` if that's available. + Objects implementing the iterator protocol are cast to a NumPy array, + and from there the array's type is used. + + Parameters + ---------- + element : object + Possibly has a `.dtype` attribute, and possibly the iterator + protocol. + + Returns + ------- + tipo : type + + Examples + -------- + >>> from collections import namedtuple + >>> Foo = namedtuple("Foo", "dtype") + >>> _maybe_infer_dtype_type(Foo(np.dtype("i8"))) + dtype('int64') + """ + tipo = None + if hasattr(element, "dtype"): + tipo = element.dtype + elif is_list_like(element): + element = np.asarray(element) + tipo = element.dtype + return tipo + + +def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None: + """ + Change string like dtypes to object for + ``DataFrame.select_dtypes()``. + """ + # error: Argument 1 to has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + # error: Argument 2 to has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + non_string_dtypes = dtype_set - { + np.dtype("S").type, # type: ignore[arg-type] + np.dtype(" np.ndarray: + """coerce the indexer input array to the smallest dtype possible""" + length = len(categories) + if length < _int8_max: + return ensure_int8(indexer) + elif length < _int16_max: + return ensure_int16(indexer) + elif length < _int32_max: + return ensure_int32(indexer) + return ensure_int64(indexer) + + +def convert_dtypes( + input_array: ArrayLike, + convert_string: bool = True, + convert_integer: bool = True, + convert_boolean: bool = True, + convert_floating: bool = True, + infer_objects: bool = False, + dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable", +) -> DtypeObj: + """ + Convert objects to best possible type, and optionally, + to types supporting ``pd.NA``. + + Parameters + ---------- + input_array : ExtensionArray or np.ndarray + convert_string : bool, default True + Whether object dtypes should be converted to ``StringDtype()``. + convert_integer : bool, default True + Whether, if possible, conversion can be done to integer extension types. + convert_boolean : bool, defaults True + Whether object dtypes should be converted to ``BooleanDtypes()``. + convert_floating : bool, defaults True + Whether, if possible, conversion can be done to floating extension types. + If `convert_integer` is also True, preference will be give to integer + dtypes if the floats can be faithfully casted to integers. + infer_objects : bool, defaults False + Whether to also infer objects to float/int if possible. Is only hit if the + object array contains pd.NA. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + np.dtype, or ExtensionDtype + """ + inferred_dtype: str | DtypeObj + + if ( + convert_string or convert_integer or convert_boolean or convert_floating + ) and isinstance(input_array, np.ndarray): + if input_array.dtype == object: + inferred_dtype = lib.infer_dtype(input_array) + else: + inferred_dtype = input_array.dtype + + if is_string_dtype(inferred_dtype): + if not convert_string or inferred_dtype == "bytes": + inferred_dtype = input_array.dtype + else: + inferred_dtype = pandas_dtype_func("string") + + if convert_integer: + target_int_dtype = pandas_dtype_func("Int64") + + if input_array.dtype.kind in "iu": + from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE + + inferred_dtype = NUMPY_INT_TO_DTYPE.get( + input_array.dtype, target_int_dtype + ) + elif input_array.dtype.kind in "fcb": + # TODO: de-dup with maybe_cast_to_integer_array? + arr = input_array[notna(input_array)] + if (arr.astype(int) == arr).all(): + inferred_dtype = target_int_dtype + else: + inferred_dtype = input_array.dtype + elif ( + infer_objects + and input_array.dtype == object + and (isinstance(inferred_dtype, str) and inferred_dtype == "integer") + ): + inferred_dtype = target_int_dtype + + if convert_floating: + if input_array.dtype.kind in "fcb": + # i.e. numeric but not integer + from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE + + inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get( + input_array.dtype, pandas_dtype_func("Float64") + ) + # if we could also convert to integer, check if all floats + # are actually integers + if convert_integer: + # TODO: de-dup with maybe_cast_to_integer_array? + arr = input_array[notna(input_array)] + if (arr.astype(int) == arr).all(): + inferred_dtype = pandas_dtype_func("Int64") + else: + inferred_dtype = inferred_float_dtype + else: + inferred_dtype = inferred_float_dtype + elif ( + infer_objects + and input_array.dtype == object + and ( + isinstance(inferred_dtype, str) + and inferred_dtype == "mixed-integer-float" + ) + ): + inferred_dtype = pandas_dtype_func("Float64") + + if convert_boolean: + if input_array.dtype.kind == "b": + inferred_dtype = pandas_dtype_func("boolean") + elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean": + inferred_dtype = pandas_dtype_func("boolean") + + if isinstance(inferred_dtype, str): + # If we couldn't do anything else, then we retain the dtype + inferred_dtype = input_array.dtype + + else: + inferred_dtype = input_array.dtype + + if dtype_backend == "pyarrow": + from pandas.core.arrays.arrow.array import to_pyarrow_type + from pandas.core.arrays.string_ import StringDtype + + assert not isinstance(inferred_dtype, str) + + if ( + (convert_integer and inferred_dtype.kind in "iu") + or (convert_floating and inferred_dtype.kind in "fc") + or (convert_boolean and inferred_dtype.kind == "b") + or (convert_string and isinstance(inferred_dtype, StringDtype)) + or ( + inferred_dtype.kind not in "iufcb" + and not isinstance(inferred_dtype, StringDtype) + ) + ): + if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance( + inferred_dtype, DatetimeTZDtype + ): + base_dtype = inferred_dtype.base + elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)): + base_dtype = inferred_dtype.numpy_dtype + elif isinstance(inferred_dtype, StringDtype): + base_dtype = np.dtype(str) + else: + base_dtype = inferred_dtype + if ( + base_dtype.kind == "O" # type: ignore[union-attr] + and input_array.size > 0 + and isna(input_array).all() + ): + import pyarrow as pa + + pa_type = pa.null() + else: + pa_type = to_pyarrow_type(base_dtype) + if pa_type is not None: + inferred_dtype = ArrowDtype(pa_type) + elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype): + # GH 53648 + inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype] + + # error: Incompatible return value type (got "Union[str, Union[dtype[Any], + # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]") + return inferred_dtype # type: ignore[return-value] + + +def maybe_infer_to_datetimelike( + value: npt.NDArray[np.object_], +) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray: + """ + we might have a array (or single object) that is datetime like, + and no dtype is passed don't change the value unless we find a + datetime/timedelta set + + this is pretty strict in that a datetime/timedelta is REQUIRED + in addition to possible nulls/string likes + + Parameters + ---------- + value : np.ndarray[object] + + Returns + ------- + np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray + + """ + if not isinstance(value, np.ndarray) or value.dtype != object: + # Caller is responsible for passing only ndarray[object] + raise TypeError(type(value)) # pragma: no cover + if value.ndim != 1: + # Caller is responsible + raise ValueError(value.ndim) # pragma: no cover + + if not len(value): + return value + + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray, + # TimedeltaArray, PeriodArray, IntervalArray]") + return lib.maybe_convert_objects( # type: ignore[return-value] + value, + # Here we do not convert numeric dtypes, as if we wanted that, + # numpy would have done it for us. + convert_numeric=False, + convert_non_numeric=True, + dtype_if_all_nat=np.dtype("M8[ns]"), + ) + + +def maybe_cast_to_datetime( + value: np.ndarray | list, dtype: np.dtype +) -> ExtensionArray | np.ndarray: + """ + try to cast the array/value to a datetimelike dtype, converting float + nan to iNaT + + Caller is responsible for handling ExtensionDtype cases and non dt64/td64 + cases. + """ + from pandas.core.arrays.datetimes import DatetimeArray + from pandas.core.arrays.timedeltas import TimedeltaArray + + assert dtype.kind in "mM" + if not is_list_like(value): + raise TypeError("value must be listlike") + + # TODO: _from_sequence would raise ValueError in cases where + # _ensure_nanosecond_dtype raises TypeError + _ensure_nanosecond_dtype(dtype) + + if lib.is_np_dtype(dtype, "m"): + res = TimedeltaArray._from_sequence(value, dtype=dtype) + return res + else: + try: + dta = DatetimeArray._from_sequence(value, dtype=dtype) + except ValueError as err: + # We can give a Series-specific exception message. + if "cannot supply both a tz and a timezone-naive dtype" in str(err): + raise ValueError( + "Cannot convert timezone-aware data to " + "timezone-naive dtype. Use " + "pd.Series(values).dt.tz_localize(None) instead." + ) from err + raise + + return dta + + +def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None: + """ + Convert dtypes with granularity less than nanosecond to nanosecond + + >>> _ensure_nanosecond_dtype(np.dtype("M8[us]")) + + >>> _ensure_nanosecond_dtype(np.dtype("M8[D]")) + Traceback (most recent call last): + ... + TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' + + >>> _ensure_nanosecond_dtype(np.dtype("m8[ps]")) + Traceback (most recent call last): + ... + TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' + """ # noqa: E501 + msg = ( + f"The '{dtype.name}' dtype has no unit. " + f"Please pass in '{dtype.name}[ns]' instead." + ) + + # unpack e.g. SparseDtype + dtype = getattr(dtype, "subtype", dtype) + + if not isinstance(dtype, np.dtype): + # i.e. datetime64tz + pass + + elif dtype.kind in "mM": + if not is_supported_dtype(dtype): + # pre-2.0 we would silently swap in nanos for lower-resolutions, + # raise for above-nano resolutions + if dtype.name in ["datetime64", "timedelta64"]: + raise ValueError(msg) + # TODO: ValueError or TypeError? existing test + # test_constructor_generic_timestamp_bad_frequency expects TypeError + raise TypeError( + f"dtype={dtype} is not supported. Supported resolutions are 's', " + "'ms', 'us', and 'ns'" + ) + + +# TODO: other value-dependent functions to standardize here include +# Index._find_common_type_compat +def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj: + """ + Find the type/dtype for the result of an operation between objects. + + This is similar to find_common_type, but looks at the right object instead + of just its dtype. This can be useful in particular when the right + object does not have a `dtype`. + + Parameters + ---------- + left_dtype : np.dtype or ExtensionDtype + right : Any + + Returns + ------- + np.dtype or ExtensionDtype + + See also + -------- + find_common_type + numpy.result_type + """ + new_dtype: DtypeObj + + if ( + isinstance(left_dtype, np.dtype) + and left_dtype.kind in "iuc" + and (lib.is_integer(right) or lib.is_float(right)) + ): + # e.g. with int8 dtype and right=512, we want to end up with + # np.int16, whereas infer_dtype_from(512) gives np.int64, + # which will make us upcast too far. + if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f": + right = int(right) + # After NEP 50, numpy won't inspect Python scalars + # TODO: do we need to recreate numpy's inspection logic for floats too + # (this breaks some tests) + if isinstance(right, int) and not isinstance(right, np.integer): + # This gives an unsigned type by default + # (if our number is positive) + + # If our left dtype is signed, we might not want this since + # this might give us 1 dtype too big + # We should check if the corresponding int dtype (e.g. int64 for uint64) + # can hold the number + right_dtype = np.min_scalar_type(right) + if right == 0: + # Special case 0 + right = left_dtype + elif ( + not np.issubdtype(left_dtype, np.unsignedinteger) + and 0 < right <= np.iinfo(right_dtype).max + ): + # If left dtype isn't unsigned, check if it fits in the signed dtype + right = np.dtype(f"i{right_dtype.itemsize}") + else: + right = right_dtype + + new_dtype = np.result_type(left_dtype, right) + + elif is_valid_na_for_dtype(right, left_dtype): + # e.g. IntervalDtype[int] and None/np.nan + new_dtype = ensure_dtype_can_hold_na(left_dtype) + + else: + dtype, _ = infer_dtype_from(right) + new_dtype = find_common_type([left_dtype, dtype]) + + return new_dtype + + +def common_dtype_categorical_compat( + objs: Sequence[Index | ArrayLike], dtype: DtypeObj +) -> DtypeObj: + """ + Update the result of find_common_type to account for NAs in a Categorical. + + Parameters + ---------- + objs : list[np.ndarray | ExtensionArray | Index] + dtype : np.dtype or ExtensionDtype + + Returns + ------- + np.dtype or ExtensionDtype + """ + # GH#38240 + + # TODO: more generally, could do `not can_hold_na(dtype)` + if lib.is_np_dtype(dtype, "iu"): + for obj in objs: + # We don't want to accientally allow e.g. "categorical" str here + obj_dtype = getattr(obj, "dtype", None) + if isinstance(obj_dtype, CategoricalDtype): + if isinstance(obj, ABCIndex): + # This check may already be cached + hasnas = obj.hasnans + else: + # Categorical + hasnas = cast("Categorical", obj)._hasna + + if hasnas: + # see test_union_int_categorical_with_nan + dtype = np.dtype(np.float64) + break + return dtype + + +def np_find_common_type(*dtypes: np.dtype) -> np.dtype: + """ + np.find_common_type implementation pre-1.25 deprecation using np.result_type + https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065 + + Parameters + ---------- + dtypes : np.dtypes + + Returns + ------- + np.dtype + """ + try: + common_dtype = np.result_type(*dtypes) + if common_dtype.kind in "mMSU": + # NumPy promotion currently (1.25) misbehaves for for times and strings, + # so fall back to object (find_common_dtype did unless there + # was only one dtype) + common_dtype = np.dtype("O") + + except TypeError: + common_dtype = np.dtype("O") + return common_dtype + + +@overload +def find_common_type(types: list[np.dtype]) -> np.dtype: + ... + + +@overload +def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: + ... + + +@overload +def find_common_type(types: list[DtypeObj]) -> DtypeObj: + ... + + +def find_common_type(types): + """ + Find a common data type among the given dtypes. + + Parameters + ---------- + types : list of dtypes + + Returns + ------- + pandas extension or numpy dtype + + See Also + -------- + numpy.find_common_type + + """ + if not types: + raise ValueError("no types given") + + first = types[0] + + # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) + # => object + if lib.dtypes_all_equal(list(types)): + return first + + # get unique types (dict.fromkeys is used as order-preserving set()) + types = list(dict.fromkeys(types).keys()) + + if any(isinstance(t, ExtensionDtype) for t in types): + for t in types: + if isinstance(t, ExtensionDtype): + res = t._get_common_dtype(types) + if res is not None: + return res + return np.dtype("object") + + # take lowest unit + if all(lib.is_np_dtype(t, "M") for t in types): + return np.dtype(max(types)) + if all(lib.is_np_dtype(t, "m") for t in types): + return np.dtype(max(types)) + + # don't mix bool / int or float or complex + # this is different from numpy, which casts bool with float/int as int + has_bools = any(t.kind == "b" for t in types) + if has_bools: + for t in types: + if t.kind in "iufc": + return np.dtype("object") + + return np_find_common_type(*types) + + +def construct_2d_arraylike_from_scalar( + value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool +) -> np.ndarray: + shape = (length, width) + + if dtype.kind in "mM": + value = _maybe_box_and_unbox_datetimelike(value, dtype) + elif dtype == _dtype_obj: + if isinstance(value, (np.timedelta64, np.datetime64)): + # calling np.array below would cast to pytimedelta/pydatetime + out = np.empty(shape, dtype=object) + out.fill(value) + return out + + # Attempt to coerce to a numpy array + try: + if not copy: + arr = np.asarray(value, dtype=dtype) + else: + arr = np.array(value, dtype=dtype, copy=copy) + except (ValueError, TypeError) as err: + raise TypeError( + f"DataFrame constructor called with incompatible data and dtype: {err}" + ) from err + + if arr.ndim != 0: + raise ValueError("DataFrame constructor not properly called!") + + return np.full(shape, arr) + + +def construct_1d_arraylike_from_scalar( + value: Scalar, length: int, dtype: DtypeObj | None +) -> ArrayLike: + """ + create a np.ndarray / pandas type of specified shape and dtype + filled with values + + Parameters + ---------- + value : scalar value + length : int + dtype : pandas_dtype or np.dtype + + Returns + ------- + np.ndarray / pandas type of length, filled with value + + """ + + if dtype is None: + try: + dtype, value = infer_dtype_from_scalar(value) + except OutOfBoundsDatetime: + dtype = _dtype_obj + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + seq = [] if length == 0 else [value] + subarr = cls._from_sequence(seq, dtype=dtype).repeat(length) + + else: + if length and dtype.kind in "iu" and isna(value): + # coerce if we have nan for an integer dtype + dtype = np.dtype("float64") + elif lib.is_np_dtype(dtype, "US"): + # we need to coerce to object dtype to avoid + # to allow numpy to take our string as a scalar value + dtype = np.dtype("object") + if not isna(value): + value = ensure_str(value) + elif dtype.kind in "mM": + value = _maybe_box_and_unbox_datetimelike(value, dtype) + + subarr = np.empty(length, dtype=dtype) + if length: + # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes + subarr.fill(value) + + return subarr + + +def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj): + # Caller is responsible for checking dtype.kind in "mM" + + if isinstance(value, dt.datetime): + # we dont want to box dt64, in particular datetime64("NaT") + value = maybe_box_datetimelike(value, dtype) + + return _maybe_unbox_datetimelike(value, dtype) + + +def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray: + """ + Transform any list-like object in a 1-dimensional numpy array of object + dtype. + + Parameters + ---------- + values : any iterable which has a len() + + Raises + ------ + TypeError + * If `values` does not have a len() + + Returns + ------- + 1-dimensional numpy array of dtype object + """ + # numpy will try to interpret nested lists as further dimensions, hence + # making a 1D array that contains list-likes is a bit tricky: + result = np.empty(len(values), dtype="object") + result[:] = values + return result + + +def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray: + """ + Takes any dtype and returns the casted version, raising for when data is + incompatible with integer/unsigned integer dtypes. + + Parameters + ---------- + arr : np.ndarray or list + The array to cast. + dtype : np.dtype + The integer dtype to cast the array to. + + Returns + ------- + ndarray + Array of integer or unsigned integer dtype. + + Raises + ------ + OverflowError : the dtype is incompatible with the data + ValueError : loss of precision has occurred during casting + + Examples + -------- + If you try to coerce negative values to unsigned integers, it raises: + + >>> pd.Series([-1], dtype="uint64") + Traceback (most recent call last): + ... + OverflowError: Trying to coerce negative values to unsigned integers + + Also, if you try to coerce float values to integers, it raises: + + >>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64")) + Traceback (most recent call last): + ... + ValueError: Trying to coerce float values to integers + """ + assert dtype.kind in "iu" + + try: + if not isinstance(arr, np.ndarray): + with warnings.catch_warnings(): + # We already disallow dtype=uint w/ negative numbers + # (test_constructor_coercion_signed_to_unsigned) so safe to ignore. + if not np_version_gt2: + warnings.filterwarnings( + "ignore", + "NumPy will stop allowing conversion of " + "out-of-bound Python int", + DeprecationWarning, + ) + casted = np.asarray(arr, dtype=dtype) + else: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + casted = arr.astype(dtype, copy=False) + except OverflowError as err: + raise OverflowError( + "The elements provided in the data cannot all be " + f"casted to the dtype {dtype}" + ) from err + + if isinstance(arr, np.ndarray) and arr.dtype == dtype: + # avoid expensive array_equal check + return casted + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + warnings.filterwarnings( + "ignore", "elementwise comparison failed", FutureWarning + ) + if np.array_equal(arr, casted): + return casted + + # We do this casting to allow for proper + # data and dtype checking. + # + # We didn't do this earlier because NumPy + # doesn't handle `uint64` correctly. + arr = np.asarray(arr) + + if np.issubdtype(arr.dtype, str): + # TODO(numpy-2.0 min): This case will raise an OverflowError above + if (casted.astype(str) == arr).all(): + return casted + raise ValueError(f"string values cannot be losslessly cast to {dtype}") + + if dtype.kind == "u" and (arr < 0).any(): + # TODO: can this be hit anymore after numpy 2.0? + raise OverflowError("Trying to coerce negative values to unsigned integers") + + if arr.dtype.kind == "f": + if not np.isfinite(arr).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + raise ValueError("Trying to coerce float values to integers") + if arr.dtype == object: + raise ValueError("Trying to coerce float values to integers") + + if casted.dtype < arr.dtype: + # TODO: Can this path be hit anymore with numpy > 2 + # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows + raise ValueError( + f"Values are too large to be losslessly converted to {dtype}. " + f"To cast anyway, use pd.Series(values).astype({dtype})" + ) + + if arr.dtype.kind in "mM": + # test_constructor_maskedarray_nonfloat + raise TypeError( + f"Constructing a Series or DataFrame from {arr.dtype} values and " + f"dtype={dtype} is not supported. Use values.view({dtype}) instead." + ) + + # No known cases that get here, but raising explicitly to cover our bases. + raise ValueError(f"values cannot be losslessly cast to {dtype}") + + +def can_hold_element(arr: ArrayLike, element: Any) -> bool: + """ + Can we do an inplace setitem with this element in an array with this dtype? + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + element : Any + + Returns + ------- + bool + """ + dtype = arr.dtype + if not isinstance(dtype, np.dtype) or dtype.kind in "mM": + if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)): + # np.dtype here catches datetime64ns and timedelta64ns; we assume + # in this case that we have DatetimeArray/TimedeltaArray + arr = cast( + "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr + ) + try: + arr._validate_setitem_value(element) + return True + except (ValueError, TypeError): + return False + + # This is technically incorrect, but maintains the behavior of + # ExtensionBlock._can_hold_element + return True + + try: + np_can_hold_element(dtype, element) + return True + except (TypeError, LossySetitemError): + return False + + +def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: + """ + Raise if we cannot losslessly set this element into an ndarray with this dtype. + + Specifically about places where we disagree with numpy. i.e. there are + cases where numpy will raise in doing the setitem that we do not check + for here, e.g. setting str "X" into a numeric ndarray. + + Returns + ------- + Any + The element, potentially cast to the dtype. + + Raises + ------ + ValueError : If we cannot losslessly store this element with this dtype. + """ + if dtype == _dtype_obj: + return element + + tipo = _maybe_infer_dtype_type(element) + + if dtype.kind in "iu": + if isinstance(element, range): + if _dtype_can_hold_range(element, dtype): + return element + raise LossySetitemError + + if is_integer(element) or (is_float(element) and element.is_integer()): + # e.g. test_setitem_series_int8 if we have a python int 1 + # tipo may be np.int32, despite the fact that it will fit + # in smaller int dtypes. + info = np.iinfo(dtype) + if info.min <= element <= info.max: + return dtype.type(element) + raise LossySetitemError + + if tipo is not None: + if tipo.kind not in "iu": + if isinstance(element, np.ndarray) and element.dtype.kind == "f": + # If all can be losslessly cast to integers, then we can hold them + with np.errstate(invalid="ignore"): + # We check afterwards if cast was losslessly, so no need to show + # the warning + casted = element.astype(dtype) + comp = casted == element + if comp.all(): + # Return the casted values bc they can be passed to + # np.putmask, whereas the raw values cannot. + # see TestSetitemFloatNDarrayIntoIntegerSeries + return casted + raise LossySetitemError + + elif isinstance(element, ABCExtensionArray) and isinstance( + element.dtype, CategoricalDtype + ): + # GH#52927 setting Categorical value into non-EA frame + # TODO: general-case for EAs? + try: + casted = element.astype(dtype) + except (ValueError, TypeError): + raise LossySetitemError + # Check for cases of either + # a) lossy overflow/rounding or + # b) semantic changes like dt64->int64 + comp = casted == element + if not comp.all(): + raise LossySetitemError + return casted + + # Anything other than integer we cannot hold + raise LossySetitemError + if ( + dtype.kind == "u" + and isinstance(element, np.ndarray) + and element.dtype.kind == "i" + ): + # see test_where_uint64 + casted = element.astype(dtype) + if (casted == element).all(): + # TODO: faster to check (element >=0).all()? potential + # itemsize issues there? + return casted + raise LossySetitemError + if dtype.itemsize < tipo.itemsize: + raise LossySetitemError + if not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype; we can put this into an ndarray + # losslessly iff it has no NAs + arr = element._values if isinstance(element, ABCSeries) else element + if arr._hasna: + raise LossySetitemError + return element + + return element + + raise LossySetitemError + + if dtype.kind == "f": + if lib.is_integer(element) or lib.is_float(element): + casted = dtype.type(element) + if np.isnan(casted) or casted == element: + return casted + # otherwise e.g. overflow see TestCoercionFloat32 + raise LossySetitemError + + if tipo is not None: + # TODO: itemsize check? + if tipo.kind not in "iuf": + # Anything other than float/integer we cannot hold + raise LossySetitemError + if not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype or FloatingDtype; + # we can put this into an ndarray losslessly iff it has no NAs + if element._hasna: + raise LossySetitemError + return element + elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind: + if isinstance(element, np.ndarray): + # e.g. TestDataFrameIndexingWhere::test_where_alignment + casted = element.astype(dtype) + if np.array_equal(casted, element, equal_nan=True): + return casted + raise LossySetitemError + + return element + + raise LossySetitemError + + if dtype.kind == "c": + if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element): + if np.isnan(element): + # see test_where_complex GH#6345 + return dtype.type(element) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + casted = dtype.type(element) + if casted == element: + return casted + # otherwise e.g. overflow see test_32878_complex_itemsize + raise LossySetitemError + + if tipo is not None: + if tipo.kind in "iufc": + return element + raise LossySetitemError + raise LossySetitemError + + if dtype.kind == "b": + if tipo is not None: + if tipo.kind == "b": + if not isinstance(tipo, np.dtype): + # i.e. we have a BooleanArray + if element._hasna: + # i.e. there are pd.NA elements + raise LossySetitemError + return element + raise LossySetitemError + if lib.is_bool(element): + return element + raise LossySetitemError + + if dtype.kind == "S": + # TODO: test tests.frame.methods.test_replace tests get here, + # need more targeted tests. xref phofl has a PR about this + if tipo is not None: + if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize: + return element + raise LossySetitemError + if isinstance(element, bytes) and len(element) <= dtype.itemsize: + return element + raise LossySetitemError + + if dtype.kind == "V": + # i.e. np.void, which cannot hold _anything_ + raise LossySetitemError + + raise NotImplementedError(dtype) + + +def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool: + """ + _maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints), + but in many cases a range can be held by a smaller integer dtype. + Check if this is one of those cases. + """ + if not len(rng): + return True + return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype) + + +def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool: + """ + np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar + inference + + Parameters + ---------- + element : Scalar + dtype : np.dtype + + Returns + ------- + bool + """ + try: + np_can_hold_element(dtype, element) + return True + except (LossySetitemError, NotImplementedError): + return False diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bfe47e4d5b6024cc5c5e6799f4cc681ba786802 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..918793d8152326ec70658ed9c52d99e543e04c34 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a6032a746c9da799efcc90ef2dbbbbd752cb14d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e5e7fbf3db06d6218584a29e63e2cb5e7553bef Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc5890f9de485b745307b6aa15c02a6ad00e4d94 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/internals/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2eb413440ba9c1ef4c016cd874d19c2aba6d791e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/__init__.py @@ -0,0 +1,85 @@ +from pandas.core.internals.api import make_block # 2023-09-18 pyarrow uses this +from pandas.core.internals.array_manager import ( + ArrayManager, + SingleArrayManager, +) +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, +) +from pandas.core.internals.concat import concatenate_managers +from pandas.core.internals.managers import ( + BlockManager, + SingleBlockManager, +) + +__all__ = [ + "Block", # pylint: disable=undefined-all-variable + "DatetimeTZBlock", # pylint: disable=undefined-all-variable + "ExtensionBlock", # pylint: disable=undefined-all-variable + "make_block", + "DataManager", + "ArrayManager", + "BlockManager", + "SingleDataManager", + "SingleBlockManager", + "SingleArrayManager", + "concatenate_managers", +] + + +def __getattr__(name: str): + # GH#55139 + import warnings + + if name == "create_block_manager_from_blocks": + # GH#33892 + warnings.warn( + f"{name} is deprecated and will be removed in a future version. " + "Use public APIs instead.", + DeprecationWarning, + # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758 + # on hard-coding stacklevel + stacklevel=2, + ) + from pandas.core.internals.managers import create_block_manager_from_blocks + + return create_block_manager_from_blocks + + if name in [ + "NumericBlock", + "ObjectBlock", + "Block", + "ExtensionBlock", + "DatetimeTZBlock", + ]: + warnings.warn( + f"{name} is deprecated and will be removed in a future version. " + "Use public APIs instead.", + DeprecationWarning, + # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758 + # on hard-coding stacklevel + stacklevel=2, + ) + if name == "NumericBlock": + from pandas.core.internals.blocks import NumericBlock + + return NumericBlock + elif name == "DatetimeTZBlock": + from pandas.core.internals.blocks import DatetimeTZBlock + + return DatetimeTZBlock + elif name == "ExtensionBlock": + from pandas.core.internals.blocks import ExtensionBlock + + return ExtensionBlock + elif name == "Block": + from pandas.core.internals.blocks import Block + + return Block + else: + from pandas.core.internals.blocks import ObjectBlock + + return ObjectBlock + + raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'") diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c19041fd67d2697bea7720661ea4dd228b64919 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f2083011e01c301da4c37dcefc93e21e09486d5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..822a7445e4eec761ce13bb7e35ba9c1faa05e34e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cc09fb4f93e46f80116c613e007328c361b2ca4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f45438465e39f25383aff9ee548e4c2976cff18 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00617ac010636878631ada7fa9f1f38865a46cab Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/api.py b/venv/lib/python3.10/site-packages/pandas/core/internals/api.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b3937ca47ea06c42b4b51964f6a74830a5d9ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/api.py @@ -0,0 +1,156 @@ +""" +This is a pseudo-public API for downstream libraries. We ask that downstream +authors + +1) Try to avoid using internals directly altogether, and failing that, +2) Use only functions exposed here (or in core.internals) + +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.internals import BlockPlacement + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + PeriodDtype, +) + +from pandas.core.arrays import DatetimeArray +from pandas.core.construction import extract_array +from pandas.core.internals.blocks import ( + check_ndim, + ensure_block_shape, + extract_pandas_array, + get_block_type, + maybe_coerce_values, +) + +if TYPE_CHECKING: + from pandas._typing import Dtype + + from pandas.core.internals.blocks import Block + + +def make_block( + values, placement, klass=None, ndim=None, dtype: Dtype | None = None +) -> Block: + """ + This is a pseudo-public analogue to blocks.new_block. + + We ask that downstream libraries use this rather than any fully-internal + APIs, including but not limited to: + + - core.internals.blocks.make_block + - Block.make_block + - Block.make_block_same_class + - Block.__init__ + """ + if dtype is not None: + dtype = pandas_dtype(dtype) + + values, dtype = extract_pandas_array(values, dtype, ndim) + + from pandas.core.internals.blocks import ( + DatetimeTZBlock, + ExtensionBlock, + ) + + if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype): + # GH-44681 changed PeriodArray to be stored in the 2D + # NDArrayBackedExtensionBlock instead of ExtensionBlock + # -> still allow ExtensionBlock to be passed in this case for back compat + klass = None + + if klass is None: + dtype = dtype or values.dtype + klass = get_block_type(dtype) + + elif klass is DatetimeTZBlock and not isinstance(values.dtype, DatetimeTZDtype): + # pyarrow calls get here + values = DatetimeArray._simple_new( + # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has + # incompatible type "Union[ExtensionDtype, dtype[Any], None]"; + # expected "Union[dtype[datetime64], DatetimeTZDtype]" + values, + dtype=dtype, # type: ignore[arg-type] + ) + + if not isinstance(placement, BlockPlacement): + placement = BlockPlacement(placement) + + ndim = maybe_infer_ndim(values, placement, ndim) + if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)): + # GH#41168 ensure we can pass 1D dt64tz values + # More generally, any EA dtype that isn't is_1d_only_ea_dtype + values = extract_array(values, extract_numpy=True) + values = ensure_block_shape(values, ndim) + + check_ndim(values, placement, ndim) + values = maybe_coerce_values(values) + return klass(values, ndim=ndim, placement=placement) + + +def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int: + """ + If `ndim` is not provided, infer it from placement and values. + """ + if ndim is None: + # GH#38134 Block constructor now assumes ndim is not None + if not isinstance(values.dtype, np.dtype): + if len(placement) != 1: + ndim = 1 + else: + ndim = 2 + else: + ndim = values.ndim + return ndim + + +def __getattr__(name: str): + # GH#55139 + import warnings + + if name in [ + "Block", + "ExtensionBlock", + "DatetimeTZBlock", + "create_block_manager_from_blocks", + ]: + # GH#33892 + warnings.warn( + f"{name} is deprecated and will be removed in a future version. " + "Use public APIs instead.", + DeprecationWarning, + # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758 + # on hard-coding stacklevel + stacklevel=2, + ) + + if name == "create_block_manager_from_blocks": + from pandas.core.internals.managers import create_block_manager_from_blocks + + return create_block_manager_from_blocks + + elif name == "Block": + from pandas.core.internals.blocks import Block + + return Block + + elif name == "DatetimeTZBlock": + from pandas.core.internals.blocks import DatetimeTZBlock + + return DatetimeTZBlock + + elif name == "ExtensionBlock": + from pandas.core.internals.blocks import ExtensionBlock + + return ExtensionBlock + + raise AttributeError( + f"module 'pandas.core.internals.api' has no attribute '{name}'" + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/array_manager.py b/venv/lib/python3.10/site-packages/pandas/core/internals/array_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..e253f82256a5f6dd8b277b576a33597355d69dcc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/array_manager.py @@ -0,0 +1,1340 @@ +""" +Experimental manager based on storing a collection of 1D arrays +""" +from __future__ import annotations + +import itertools +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs import ( + NaT, + lib, +) + +from pandas.core.dtypes.astype import ( + astype_array, + astype_array_safe, +) +from pandas.core.dtypes.cast import ( + ensure_dtype_can_hold_na, + find_common_type, + infer_dtype_from_scalar, + np_find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_datetime64_ns_dtype, + is_integer, + is_numeric_dtype, + is_object_dtype, + is_timedelta64_ns_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + array_equals, + isna, + na_value_for_dtype, +) + +import pandas.core.algorithms as algos +from pandas.core.array_algos.quantile import quantile_compat +from pandas.core.array_algos.take import take_1d +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + NumpyExtensionArray, + TimedeltaArray, +) +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, + sanitize_array, +) +from pandas.core.indexers import ( + maybe_convert_indices, + validate_indices, +) +from pandas.core.indexes.api import ( + Index, + ensure_index, +) +from pandas.core.indexes.base import get_values_for_csv +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, + ensure_np_dtype, + interleaved_dtype, +) +from pandas.core.internals.blocks import ( + BlockPlacement, + ensure_block_shape, + external_values, + extract_pandas_array, + maybe_coerce_values, + new_block, +) +from pandas.core.internals.managers import make_na_array + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + QuantileInterpolation, + Self, + npt, + ) + + +class BaseArrayManager(DataManager): + """ + Core internal data structure to implement DataFrame and Series. + + Alternative to the BlockManager, storing a list of 1D arrays instead of + Blocks. + + This is *not* a public API class + + Parameters + ---------- + arrays : Sequence of arrays + axes : Sequence of Index + verify_integrity : bool, default True + + """ + + __slots__ = [ + "_axes", # private attribute, because 'axes' has different order, see below + "arrays", + ] + + arrays: list[np.ndarray | ExtensionArray] + _axes: list[Index] + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + raise NotImplementedError + + def make_empty(self, axes=None) -> Self: + """Return an empty ArrayManager with the items axis of len 0 (no columns)""" + if axes is None: + axes = [self.axes[1:], Index([])] + + arrays: list[np.ndarray | ExtensionArray] = [] + return type(self)(arrays, axes) + + @property + def items(self) -> Index: + return self._axes[-1] + + @property + # error: Signature of "axes" incompatible with supertype "DataManager" + def axes(self) -> list[Index]: # type: ignore[override] + # mypy doesn't work to override attribute with property + # see https://github.com/python/mypy/issues/4125 + """Axes is BlockManager-compatible order (columns, rows)""" + return [self._axes[1], self._axes[0]] + + @property + def shape_proper(self) -> tuple[int, ...]: + # this returns (n_rows, n_columns) + return tuple(len(ax) for ax in self._axes) + + @staticmethod + def _normalize_axis(axis: AxisInt) -> int: + # switch axis + axis = 1 if axis == 0 else 0 + return axis + + def set_axis(self, axis: AxisInt, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + self._validate_set_axis(axis, new_labels) + axis = self._normalize_axis(axis) + self._axes[axis] = new_labels + + def get_dtypes(self) -> npt.NDArray[np.object_]: + return np.array([arr.dtype for arr in self.arrays], dtype="object") + + def add_references(self, mgr: BaseArrayManager) -> None: + """ + Only implemented on the BlockManager level + """ + return + + def __getstate__(self): + return self.arrays, self._axes + + def __setstate__(self, state) -> None: + self.arrays = state[0] + self._axes = state[1] + + def __repr__(self) -> str: + output = type(self).__name__ + output += f"\nIndex: {self._axes[0]}" + if self.ndim == 2: + output += f"\nColumns: {self._axes[1]}" + output += f"\n{len(self.arrays)} arrays:" + for arr in self.arrays: + output += f"\n{arr.dtype}" + return output + + def apply( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + """ + Iterate over the arrays, collect and create a new ArrayManager. + + Parameters + ---------- + f : str or callable + Name of the Array method to apply. + align_keys: List[str] or None, default None + **kwargs + Keywords to pass to `f` + + Returns + ------- + ArrayManager + """ + assert "filter" not in kwargs + + align_keys = align_keys or [] + result_arrays: list[ArrayLike] = [] + # fillna: Series/DataFrame is responsible for making sure value is aligned + + aligned_args = {k: kwargs[k] for k in align_keys} + + if f == "apply": + f = kwargs.pop("func") + + for i, arr in enumerate(self.arrays): + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + kwargs[k] = obj.iloc[i] + else: + kwargs[k] = obj.iloc[:, i]._values + else: + # otherwise we have an array-like + kwargs[k] = obj[i] + + if callable(f): + applied = f(arr, **kwargs) + else: + applied = getattr(arr, f)(**kwargs) + + result_arrays.append(applied) + + new_axes = self._axes + return type(self)(result_arrays, new_axes) + + def apply_with_block(self, f, align_keys=None, **kwargs) -> Self: + # switch axis to follow BlockManager logic + swap_axis = True + if f == "interpolate": + swap_axis = False + if swap_axis and "axis" in kwargs and self.ndim == 2: + kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0 + + align_keys = align_keys or [] + aligned_args = {k: kwargs[k] for k in align_keys} + + result_arrays = [] + + for i, arr in enumerate(self.arrays): + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + if self.ndim == 2: + kwargs[k] = obj.iloc[slice(i, i + 1)]._values + else: + kwargs[k] = obj.iloc[:]._values + else: + kwargs[k] = obj.iloc[:, [i]]._values + else: + # otherwise we have an ndarray + if obj.ndim == 2: + kwargs[k] = obj[[i]] + + if isinstance(arr.dtype, np.dtype) and not isinstance(arr, np.ndarray): + # i.e. TimedeltaArray, DatetimeArray with tz=None. Need to + # convert for the Block constructors. + arr = np.asarray(arr) + + arr = maybe_coerce_values(arr) + if self.ndim == 2: + arr = ensure_block_shape(arr, 2) + bp = BlockPlacement(slice(0, 1, 1)) + block = new_block(arr, placement=bp, ndim=2) + else: + bp = BlockPlacement(slice(0, len(self), 1)) + block = new_block(arr, placement=bp, ndim=1) + + applied = getattr(block, f)(**kwargs) + if isinstance(applied, list): + applied = applied[0] + arr = applied.values + if self.ndim == 2 and arr.ndim == 2: + # 2D for np.ndarray or DatetimeArray/TimedeltaArray + assert len(arr) == 1 + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[int, slice]" + arr = arr[0, :] # type: ignore[call-overload] + result_arrays.append(arr) + + return type(self)(result_arrays, self._axes) + + def setitem(self, indexer, value, warn: bool = True) -> Self: + return self.apply_with_block("setitem", indexer=indexer, value=value) + + def diff(self, n: int) -> Self: + assert self.ndim == 2 # caller ensures + return self.apply(algos.diff, n=n) + + def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self: + if copy is None: + copy = True + + return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors) + + def convert(self, copy: bool | None) -> Self: + if copy is None: + copy = True + + def _convert(arr): + if is_object_dtype(arr.dtype): + # extract NumpyExtensionArray for tests that patch + # NumpyExtensionArray._typ + arr = np.asarray(arr) + result = lib.maybe_convert_objects( + arr, + convert_non_numeric=True, + ) + if result is arr and copy: + return arr.copy() + return result + else: + return arr.copy() if copy else arr + + return self.apply(_convert) + + def get_values_for_csv( + self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None + ) -> Self: + return self.apply( + get_values_for_csv, + na_rep=na_rep, + quoting=quoting, + float_format=float_format, + date_format=date_format, + decimal=decimal, + ) + + @property + def any_extension_types(self) -> bool: + """Whether any of the blocks in this manager are extension blocks""" + return False # any(block.is_extension for block in self.blocks) + + @property + def is_view(self) -> bool: + """return a boolean if we are a single block and are a view""" + # TODO what is this used for? + return False + + @property + def is_single_block(self) -> bool: + return len(self.arrays) == 1 + + def _get_data_subset(self, predicate: Callable) -> Self: + indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)] + arrays = [self.arrays[i] for i in indices] + # TODO copy? + # Note: using Index.take ensures we can retain e.g. DatetimeIndex.freq, + # see test_describe_datetime_columns + taker = np.array(indices, dtype="intp") + new_cols = self._axes[1].take(taker) + new_axes = [self._axes[0], new_cols] + return type(self)(arrays, new_axes, verify_integrity=False) + + def get_bool_data(self, copy: bool = False) -> Self: + """ + Select columns that are bool-dtype and object-dtype columns that are all-bool. + + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + return self._get_data_subset(lambda x: x.dtype == np.dtype(bool)) + + def get_numeric_data(self, copy: bool = False) -> Self: + """ + Select columns that have a numeric dtype. + + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + return self._get_data_subset( + lambda arr: is_numeric_dtype(arr.dtype) + or getattr(arr.dtype, "_is_numeric", False) + ) + + def copy(self, deep: bool | Literal["all"] | None = True) -> Self: + """ + Make deep or shallow copy of ArrayManager + + Parameters + ---------- + deep : bool or string, default True + If False, return shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + BlockManager + """ + if deep is None: + # ArrayManager does not yet support CoW, so deep=None always means + # deep=True for now + deep = True + + # this preserves the notion of view copying of axes + if deep: + # hit in e.g. tests.io.json.test_pandas + + def copy_func(ax): + return ax.copy(deep=True) if deep == "all" else ax.view() + + new_axes = [copy_func(ax) for ax in self._axes] + else: + new_axes = list(self._axes) + + if deep: + new_arrays = [arr.copy() for arr in self.arrays] + else: + new_arrays = list(self.arrays) + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def reindex_indexer( + self, + new_axis, + indexer, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool | None = True, + # ignored keywords + only_slice: bool = False, + # ArrayManager specific keywords + use_na_proxy: bool = False, + ) -> Self: + axis = self._normalize_axis(axis) + return self._reindex_indexer( + new_axis, + indexer, + axis, + fill_value, + allow_dups, + copy, + use_na_proxy, + ) + + def _reindex_indexer( + self, + new_axis, + indexer: npt.NDArray[np.intp] | None, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool | None = True, + use_na_proxy: bool = False, + ) -> Self: + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray[intp] or None + axis : int + fill_value : object, default None + allow_dups : bool, default False + copy : bool, default True + + + pandas-indexer with -1's only. + """ + if copy is None: + # ArrayManager does not yet support CoW, so deep=None always means + # deep=True for now + copy = True + + if indexer is None: + if new_axis is self._axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result._axes = list(self._axes) + result._axes[axis] = new_axis + return result + + # some axes don't allow reindexing with dups + if not allow_dups: + self._axes[axis]._validate_can_reindex(indexer) + + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 1: + new_arrays = [] + for i in indexer: + if i == -1: + arr = self._make_na_array( + fill_value=fill_value, use_na_proxy=use_na_proxy + ) + else: + arr = self.arrays[i] + if copy: + arr = arr.copy() + new_arrays.append(arr) + + else: + validate_indices(indexer, len(self._axes[0])) + indexer = ensure_platform_int(indexer) + mask = indexer == -1 + needs_masking = mask.any() + new_arrays = [ + take_1d( + arr, + indexer, + allow_fill=needs_masking, + fill_value=fill_value, + mask=mask, + # if fill_value is not None else blk.fill_value + ) + for arr in self.arrays + ] + + new_axes = list(self._axes) + new_axes[axis] = new_axis + + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def take( + self, + indexer: npt.NDArray[np.intp], + axis: AxisInt = 1, + verify: bool = True, + ) -> Self: + """ + Take items along any axis. + """ + assert isinstance(indexer, np.ndarray), type(indexer) + assert indexer.dtype == np.intp, indexer.dtype + + axis = self._normalize_axis(axis) + + if not indexer.ndim == 1: + raise ValueError("indexer should be 1-dimensional") + + n = self.shape_proper[axis] + indexer = maybe_convert_indices(indexer, n, verify=verify) + + new_labels = self._axes[axis].take(indexer) + return self._reindex_indexer( + new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True + ) + + def _make_na_array(self, fill_value=None, use_na_proxy: bool = False): + if use_na_proxy: + assert fill_value is None + return NullArrayProxy(self.shape_proper[0]) + + if fill_value is None: + fill_value = np.nan + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + array_values = make_na_array(dtype, self.shape_proper[:1], fill_value) + return array_values + + def _equal_values(self, other) -> bool: + """ + Used in .equals defined in base class. Only check the column values + assuming shape and indexes have already been checked. + """ + for left, right in zip(self.arrays, other.arrays): + if not array_equals(left, right): + return False + return True + + # TODO + # to_dict + + +class ArrayManager(BaseArrayManager): + @property + def ndim(self) -> Literal[2]: + return 2 + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + # Note: we are storing the axes in "_axes" in the (row, columns) order + # which contrasts the order how it is stored in BlockManager + self._axes = axes + self.arrays = arrays + + if verify_integrity: + self._axes = [ensure_index(ax) for ax in axes] + arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays] + self.arrays = [maybe_coerce_values(arr) for arr in arrays] + self._verify_integrity() + + def _verify_integrity(self) -> None: + n_rows, n_columns = self.shape_proper + if not len(self.arrays) == n_columns: + raise ValueError( + "Number of passed arrays must equal the size of the column Index: " + f"{len(self.arrays)} arrays vs {n_columns} columns." + ) + for arr in self.arrays: + if not len(arr) == n_rows: + raise ValueError( + "Passed arrays should have the same length as the rows Index: " + f"{len(arr)} vs {n_rows} rows" + ) + if not isinstance(arr, (np.ndarray, ExtensionArray)): + raise ValueError( + "Passed arrays should be np.ndarray or ExtensionArray instances, " + f"got {type(arr)} instead" + ) + if not arr.ndim == 1: + raise ValueError( + "Passed arrays should be 1-dimensional, got array with " + f"{arr.ndim} dimensions instead." + ) + + # -------------------------------------------------------------------- + # Indexing + + def fast_xs(self, loc: int) -> SingleArrayManager: + """ + Return the array corresponding to `frame.iloc[loc]`. + + Parameters + ---------- + loc : int + + Returns + ------- + np.ndarray or ExtensionArray + """ + dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) + + values = [arr[loc] for arr in self.arrays] + if isinstance(dtype, ExtensionDtype): + result = dtype.construct_array_type()._from_sequence(values, dtype=dtype) + # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT + elif is_datetime64_ns_dtype(dtype): + result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray + elif is_timedelta64_ns_dtype(dtype): + result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray + else: + result = np.array(values, dtype=dtype) + return SingleArrayManager([result], [self._axes[1]]) + + def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager: + axis = self._normalize_axis(axis) + + if axis == 0: + arrays = [arr[slobj] for arr in self.arrays] + elif axis == 1: + arrays = self.arrays[slobj] + + new_axes = list(self._axes) + new_axes[axis] = new_axes[axis]._getitem_slice(slobj) + + return type(self)(arrays, new_axes, verify_integrity=False) + + def iget(self, i: int) -> SingleArrayManager: + """ + Return the data as a SingleArrayManager. + """ + values = self.arrays[i] + return SingleArrayManager([values], [self._axes[0]]) + + def iget_values(self, i: int) -> ArrayLike: + """ + Return the data for column i as the values (ndarray or ExtensionArray). + """ + return self.arrays[i] + + @property + def column_arrays(self) -> list[ArrayLike]: + """ + Used in the JSON C code to access column arrays. + """ + + return [np.asarray(arr) for arr in self.arrays] + + def iset( + self, + loc: int | slice | np.ndarray, + value: ArrayLike, + inplace: bool = False, + refs=None, + ) -> None: + """ + Set new column(s). + + This changes the ArrayManager in-place, but replaces (an) existing + column(s), not changing column values in-place). + + Parameters + ---------- + loc : integer, slice or boolean mask + Positional location (already bounds checked) + value : np.ndarray or ExtensionArray + inplace : bool, default False + Whether overwrite existing array as opposed to replacing it. + """ + # single column -> single integer index + if lib.is_integer(loc): + # TODO can we avoid needing to unpack this here? That means converting + # DataFrame into 1D array when loc is an integer + if isinstance(value, np.ndarray) and value.ndim == 2: + assert value.shape[1] == 1 + value = value[:, 0] + + # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item + # but we should avoid that and pass directly the proper array + value = maybe_coerce_values(value) + + assert isinstance(value, (np.ndarray, ExtensionArray)) + assert value.ndim == 1 + assert len(value) == len(self._axes[0]) + self.arrays[loc] = value + return + + # multiple columns -> convert slice or array to integer indices + elif isinstance(loc, slice): + indices: range | np.ndarray = range( + loc.start if loc.start is not None else 0, + loc.stop if loc.stop is not None else self.shape_proper[1], + loc.step if loc.step is not None else 1, + ) + else: + assert isinstance(loc, np.ndarray) + assert loc.dtype == "bool" + indices = np.nonzero(loc)[0] + + assert value.ndim == 2 + assert value.shape[0] == len(self._axes[0]) + + for value_idx, mgr_idx in enumerate(indices): + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[slice, int]" + value_arr = value[:, value_idx] # type: ignore[call-overload] + self.arrays[mgr_idx] = value_arr + return + + def column_setitem( + self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False + ) -> None: + """ + Set values ("setitem") into a single column (not setting the full column). + + This is a method on the ArrayManager level, to avoid creating an + intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) + """ + if not is_integer(loc): + raise TypeError("The column index should be an integer") + arr = self.arrays[loc] + mgr = SingleArrayManager([arr], [self._axes[0]]) + if inplace_only: + mgr.setitem_inplace(idx, value) + else: + new_mgr = mgr.setitem((idx,), value) + # update existing ArrayManager in-place + self.arrays[loc] = new_mgr.arrays[0] + + def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None: + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : np.ndarray or ExtensionArray + """ + # insert to the axis; this could possibly raise a TypeError + new_axis = self.items.insert(loc, item) + + value = extract_array(value, extract_numpy=True) + if value.ndim == 2: + if value.shape[0] == 1: + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[int, slice]" + value = value[0, :] # type: ignore[call-overload] + else: + raise ValueError( + f"Expected a 1D array, got an array with shape {value.shape}" + ) + value = maybe_coerce_values(value) + + # TODO self.arrays can be empty + # assert len(value) == len(self.arrays[0]) + + # TODO is this copy needed? + arrays = self.arrays.copy() + arrays.insert(loc, value) + + self.arrays = arrays + self._axes[1] = new_axis + + def idelete(self, indexer) -> ArrayManager: + """ + Delete selected locations in-place (new block and array, same BlockManager) + """ + to_keep = np.ones(self.shape[0], dtype=np.bool_) + to_keep[indexer] = False + + self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]] + self._axes = [self._axes[0], self._axes[1][to_keep]] + return self + + # -------------------------------------------------------------------- + # Array-wise Operation + + def grouped_reduce(self, func: Callable) -> Self: + """ + Apply grouped reduction function columnwise, returning a new ArrayManager. + + Parameters + ---------- + func : grouped reduction function + + Returns + ------- + ArrayManager + """ + result_arrays: list[np.ndarray] = [] + result_indices: list[int] = [] + + for i, arr in enumerate(self.arrays): + # grouped_reduce functions all expect 2D arrays + arr = ensure_block_shape(arr, ndim=2) + res = func(arr) + if res.ndim == 2: + # reverse of ensure_block_shape + assert res.shape[0] == 1 + res = res[0] + + result_arrays.append(res) + result_indices.append(i) + + if len(result_arrays) == 0: + nrows = 0 + else: + nrows = result_arrays[0].shape[0] + index = Index(range(nrows)) + + columns = self.items + + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] + + def reduce(self, func: Callable) -> Self: + """ + Apply reduction function column-wise, returning a single-row ArrayManager. + + Parameters + ---------- + func : reduction function + + Returns + ------- + ArrayManager + """ + result_arrays: list[np.ndarray] = [] + for i, arr in enumerate(self.arrays): + res = func(arr, axis=0) + + # TODO NaT doesn't preserve dtype, so we need to ensure to create + # a timedelta result array if original was timedelta + # what if datetime results in timedelta? (eg std) + dtype = arr.dtype if res is NaT else None + result_arrays.append( + sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type] + ) + + index = Index._simple_new(np.array([None], dtype=object)) # placeholder + columns = self.items + + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] + return new_mgr + + def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager: + """ + Apply array_op blockwise with another (aligned) BlockManager. + """ + # TODO what if `other` is BlockManager ? + left_arrays = self.arrays + right_arrays = other.arrays + result_arrays = [ + array_op(left, right) for left, right in zip(left_arrays, right_arrays) + ] + return type(self)(result_arrays, self._axes) + + def quantile( + self, + *, + qs: Index, # with dtype float64 + transposed: bool = False, + interpolation: QuantileInterpolation = "linear", + ) -> ArrayManager: + arrs = [ensure_block_shape(x, 2) for x in self.arrays] + new_arrs = [ + quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs + ] + for i, arr in enumerate(new_arrs): + if arr.ndim == 2: + assert arr.shape[0] == 1, arr.shape + new_arrs[i] = arr[0] + + axes = [qs, self._axes[1]] + return type(self)(new_arrs, axes) + + # ---------------------------------------------------------------- + + def unstack(self, unstacker, fill_value) -> ArrayManager: + """ + Return a BlockManager with all blocks unstacked. + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : Any + fill_value for newly introduced missing values. + + Returns + ------- + unstacked : BlockManager + """ + indexer, _ = unstacker._indexer_and_to_sort + if unstacker.mask.all(): + new_indexer = indexer + allow_fill = False + new_mask2D = None + needs_masking = None + else: + new_indexer = np.full(unstacker.mask.shape, -1) + new_indexer[unstacker.mask] = indexer + allow_fill = True + # calculating the full mask once and passing it to take_1d is faster + # than letting take_1d calculate it in each repeated call + new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) + needs_masking = new_mask2D.any(axis=0) + new_indexer2D = new_indexer.reshape(*unstacker.full_shape) + new_indexer2D = ensure_platform_int(new_indexer2D) + + new_arrays = [] + for arr in self.arrays: + for i in range(unstacker.full_shape[1]): + if allow_fill: + # error: Value of type "Optional[Any]" is not indexable [index] + new_arr = take_1d( + arr, + new_indexer2D[:, i], + allow_fill=needs_masking[i], # type: ignore[index] + fill_value=fill_value, + mask=new_mask2D[:, i], # type: ignore[index] + ) + else: + new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False) + new_arrays.append(new_arr) + + new_index = unstacker.new_index + new_columns = unstacker.get_new_columns(self._axes[1]) + new_axes = [new_index, new_columns] + + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def as_array( + self, + dtype=None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert the blockmanager data into an numpy array. + + Parameters + ---------- + dtype : object, default None + Data type of the return array. + copy : bool, default False + If True then guarantee that a copy is returned. A value of + False does not guarantee that the underlying data is not + copied. + na_value : object, default lib.no_default + Value to be used as the missing value sentinel. + + Returns + ------- + arr : ndarray + """ + if len(self.arrays) == 0: + empty_arr = np.empty(self.shape, dtype=float) + return empty_arr.transpose() + + # We want to copy when na_value is provided to avoid + # mutating the original object + copy = copy or na_value is not lib.no_default + + if not dtype: + dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) + + dtype = ensure_np_dtype(dtype) + + result = np.empty(self.shape_proper, dtype=dtype) + + for i, arr in enumerate(self.arrays): + arr = arr.astype(dtype, copy=copy) + result[:, i] = arr + + if na_value is not lib.no_default: + result[isna(result)] = na_value + + return result + + @classmethod + def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed ArrayManagers horizontally. + """ + # concatting along the columns -> combine reindexed arrays in a single manager + arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs])) + new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False) + return new_mgr + + @classmethod + def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed ArrayManagers vertically. + """ + # concatting along the rows -> concat the reindexed arrays + # TODO(ArrayManager) doesn't yet preserve the correct dtype + arrays = [ + concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))]) + for j in range(len(mgrs[0].arrays)) + ] + new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False) + return new_mgr + + +class SingleArrayManager(BaseArrayManager, SingleDataManager): + __slots__ = [ + "_axes", # private attribute, because 'axes' has different order, see below + "arrays", + ] + + arrays: list[np.ndarray | ExtensionArray] + _axes: list[Index] + + @property + def ndim(self) -> Literal[1]: + return 1 + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + self._axes = axes + self.arrays = arrays + + if verify_integrity: + assert len(axes) == 1 + assert len(arrays) == 1 + self._axes = [ensure_index(ax) for ax in self._axes] + arr = arrays[0] + arr = maybe_coerce_values(arr) + arr = extract_pandas_array(arr, None, 1)[0] + self.arrays = [arr] + self._verify_integrity() + + def _verify_integrity(self) -> None: + (n_rows,) = self.shape + assert len(self.arrays) == 1 + arr = self.arrays[0] + assert len(arr) == n_rows + if not arr.ndim == 1: + raise ValueError( + "Passed array should be 1-dimensional, got array with " + f"{arr.ndim} dimensions instead." + ) + + @staticmethod + def _normalize_axis(axis): + return axis + + def make_empty(self, axes=None) -> Self: + """Return an empty ArrayManager with index/array of length 0""" + if axes is None: + axes = [Index([], dtype=object)] + array: np.ndarray = np.array([], dtype=self.dtype) + return type(self)([array], axes) + + @classmethod + def from_array(cls, array, index) -> SingleArrayManager: + return cls([array], [index]) + + # error: Cannot override writeable attribute with read-only property + @property + def axes(self) -> list[Index]: # type: ignore[override] + return self._axes + + @property + def index(self) -> Index: + return self._axes[0] + + @property + def dtype(self): + return self.array.dtype + + def external_values(self): + """The array that Series.values returns""" + return external_values(self.array) + + def internal_values(self): + """The array that Series._values returns""" + return self.array + + def array_values(self): + """The array that Series.array returns""" + arr = self.array + if isinstance(arr, np.ndarray): + arr = NumpyExtensionArray(arr) + return arr + + @property + def _can_hold_na(self) -> bool: + if isinstance(self.array, np.ndarray): + return self.array.dtype.kind not in "iub" + else: + # ExtensionArray + return self.array._can_hold_na + + @property + def is_single_block(self) -> bool: + return True + + def fast_xs(self, loc: int) -> SingleArrayManager: + raise NotImplementedError("Use series._values[loc] instead") + + def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleArrayManager: + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + new_array = self.array[slobj] + new_index = self.index._getitem_slice(slobj) + return type(self)([new_array], [new_index], verify_integrity=False) + + def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> SingleArrayManager: + new_array = self.array[indexer] + new_index = self.index[indexer] + return type(self)([new_array], [new_index]) + + # error: Signature of "apply" incompatible with supertype "BaseArrayManager" + def apply(self, func, **kwargs) -> Self: # type: ignore[override] + if callable(func): + new_array = func(self.array, **kwargs) + else: + new_array = getattr(self.array, func)(**kwargs) + return type(self)([new_array], self._axes) + + def setitem(self, indexer, value, warn: bool = True) -> SingleArrayManager: + """ + Set values with indexer. + + For SingleArrayManager, this backs s[indexer] = value + + See `setitem_inplace` for a version that works inplace and doesn't + return a new Manager. + """ + if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim: + raise ValueError(f"Cannot set values with ndim > {self.ndim}") + return self.apply_with_block("setitem", indexer=indexer, value=value) + + def idelete(self, indexer) -> SingleArrayManager: + """ + Delete selected locations in-place (new array, same ArrayManager) + """ + to_keep = np.ones(self.shape[0], dtype=np.bool_) + to_keep[indexer] = False + + self.arrays = [self.arrays[0][to_keep]] + self._axes = [self._axes[0][to_keep]] + return self + + def _get_data_subset(self, predicate: Callable) -> SingleArrayManager: + # used in get_numeric_data / get_bool_data + if predicate(self.array): + return type(self)(self.arrays, self._axes, verify_integrity=False) + else: + return self.make_empty() + + def set_values(self, values: ArrayLike) -> None: + """ + Set (replace) the values of the SingleArrayManager in place. + + Use at your own risk! This does not check if the passed values are + valid for the current SingleArrayManager (length, dtype, etc). + """ + self.arrays[0] = values + + def to_2d_mgr(self, columns: Index) -> ArrayManager: + """ + Manager analogue of Series.to_frame + """ + arrays = [self.arrays[0]] + axes = [self.axes[0], columns] + + return ArrayManager(arrays, axes, verify_integrity=False) + + +class NullArrayProxy: + """ + Proxy object for an all-NA array. + + Only stores the length of the array, and not the dtype. The dtype + will only be known when actually concatenating (after determining the + common dtype, for which this proxy is ignored). + Using this object avoids that the internals/concat.py needs to determine + the proper dtype and array type. + """ + + ndim = 1 + + def __init__(self, n: int) -> None: + self.n = n + + @property + def shape(self) -> tuple[int]: + return (self.n,) + + def to_array(self, dtype: DtypeObj) -> ArrayLike: + """ + Helper function to create the actual all-NA array from the NullArrayProxy + object. + + Parameters + ---------- + arr : NullArrayProxy + dtype : the dtype for the resulting array + + Returns + ------- + np.ndarray or ExtensionArray + """ + if isinstance(dtype, ExtensionDtype): + empty = dtype.construct_array_type()._from_sequence([], dtype=dtype) + indexer = -np.ones(self.n, dtype=np.intp) + return empty.take(indexer, allow_fill=True) + else: + # when introducing missing values, int becomes float, bool becomes object + dtype = ensure_dtype_can_hold_na(dtype) + fill_value = na_value_for_dtype(dtype) + arr = np.empty(self.n, dtype=dtype) + arr.fill(fill_value) + return ensure_wrapped_if_datetimelike(arr) + + +def concat_arrays(to_concat: list) -> ArrayLike: + """ + Alternative for concat_compat but specialized for use in the ArrayManager. + + Differences: only deals with 1D arrays (no axis keyword), assumes + ensure_wrapped_if_datetimelike and does not skip empty arrays to determine + the dtype. + In addition ensures that all NullArrayProxies get replaced with actual + arrays. + + Parameters + ---------- + to_concat : list of arrays + + Returns + ------- + np.ndarray or ExtensionArray + """ + # ignore the all-NA proxies to determine the resulting dtype + to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)] + + dtypes = {x.dtype for x in to_concat_no_proxy} + single_dtype = len(dtypes) == 1 + + if single_dtype: + target_dtype = to_concat_no_proxy[0].dtype + elif all(lib.is_np_dtype(x, "iub") for x in dtypes): + # GH#42092 + target_dtype = np_find_common_type(*dtypes) + else: + target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy]) + + to_concat = [ + arr.to_array(target_dtype) + if isinstance(arr, NullArrayProxy) + else astype_array(arr, target_dtype, copy=False) + for arr in to_concat + ] + + if isinstance(to_concat[0], ExtensionArray): + cls = type(to_concat[0]) + return cls._concat_same_type(to_concat) + + result = np.concatenate(to_concat) + + # TODO decide on exact behaviour (we shouldn't do this only for empty result) + # see https://github.com/pandas-dev/pandas/issues/39817 + if len(result) == 0: + # all empties -> check for bool to not coerce to float + kinds = {obj.dtype.kind for obj in to_concat_no_proxy} + if len(kinds) != 1: + if "b" in kinds: + result = result.astype(object) + return result diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/base.py b/venv/lib/python3.10/site-packages/pandas/core/internals/base.py new file mode 100644 index 0000000000000000000000000000000000000000..ae91f167205a0628c4bcf9b61ce58e888fe6ec8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/base.py @@ -0,0 +1,407 @@ +""" +Base class for the internal managers. Both BlockManager and ArrayManager +inherit from this class. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, + final, +) + +import numpy as np + +from pandas._config import ( + using_copy_on_write, + warn_copy_on_write, +) + +from pandas._libs import ( + algos as libalgos, + lib, +) +from pandas.errors import AbstractMethodError +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.cast import ( + find_common_type, + np_can_hold_element, +) +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + SparseDtype, +) + +from pandas.core.base import PandasObject +from pandas.core.construction import extract_array +from pandas.core.indexes.api import ( + Index, + default_index, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + Self, + Shape, + ) + + +class _AlreadyWarned: + def __init__(self): + # This class is used on the manager level to the block level to + # ensure that we warn only once. The block method can update the + # warned_already option without returning a value to keep the + # interface consistent. This is only a temporary solution for + # CoW warnings. + self.warned_already = False + + +class DataManager(PandasObject): + # TODO share more methods/attributes + + axes: list[Index] + + @property + def items(self) -> Index: + raise AbstractMethodError(self) + + @final + def __len__(self) -> int: + return len(self.items) + + @property + def ndim(self) -> int: + return len(self.axes) + + @property + def shape(self) -> Shape: + return tuple(len(ax) for ax in self.axes) + + @final + def _validate_set_axis(self, axis: AxisInt, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + old_len = len(self.axes[axis]) + new_len = len(new_labels) + + if axis == 1 and len(self.items) == 0: + # If we are setting the index on a DataFrame with no columns, + # it is OK to change the length. + pass + + elif new_len != old_len: + raise ValueError( + f"Length mismatch: Expected axis has {old_len} elements, new " + f"values have {new_len} elements" + ) + + def reindex_indexer( + self, + new_axis, + indexer, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool = True, + only_slice: bool = False, + ) -> Self: + raise AbstractMethodError(self) + + @final + def reindex_axis( + self, + new_index: Index, + axis: AxisInt, + fill_value=None, + only_slice: bool = False, + ) -> Self: + """ + Conform data manager to new index. + """ + new_index, indexer = self.axes[axis].reindex(new_index) + + return self.reindex_indexer( + new_index, + indexer, + axis=axis, + fill_value=fill_value, + copy=False, + only_slice=only_slice, + ) + + def _equal_values(self, other: Self) -> bool: + """ + To be implemented by the subclasses. Only check the column values + assuming shape and indexes have already been checked. + """ + raise AbstractMethodError(self) + + @final + def equals(self, other: object) -> bool: + """ + Implementation for DataFrame.equals + """ + if not isinstance(other, type(self)): + return False + + self_axes, other_axes = self.axes, other.axes + if len(self_axes) != len(other_axes): + return False + if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): + return False + + return self._equal_values(other) + + def apply( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + raise AbstractMethodError(self) + + def apply_with_block( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + raise AbstractMethodError(self) + + @final + def isna(self, func) -> Self: + return self.apply("apply", func=func) + + @final + def fillna(self, value, limit: int | None, inplace: bool, downcast) -> Self: + if limit is not None: + # Do this validation even if we go through one of the no-op paths + limit = libalgos.validate_limit(None, limit=limit) + + return self.apply_with_block( + "fillna", + value=value, + limit=limit, + inplace=inplace, + downcast=downcast, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + @final + def where(self, other, cond, align: bool) -> Self: + if align: + align_keys = ["other", "cond"] + else: + align_keys = ["cond"] + other = extract_array(other, extract_numpy=True) + + return self.apply_with_block( + "where", + align_keys=align_keys, + other=other, + cond=cond, + using_cow=using_copy_on_write(), + ) + + @final + def putmask(self, mask, new, align: bool = True, warn: bool = True) -> Self: + if align: + align_keys = ["new", "mask"] + else: + align_keys = ["mask"] + new = extract_array(new, extract_numpy=True) + + already_warned = None + if warn_copy_on_write(): + already_warned = _AlreadyWarned() + if not warn: + already_warned.warned_already = True + + return self.apply_with_block( + "putmask", + align_keys=align_keys, + mask=mask, + new=new, + using_cow=using_copy_on_write(), + already_warned=already_warned, + ) + + @final + def round(self, decimals: int, using_cow: bool = False) -> Self: + return self.apply_with_block( + "round", + decimals=decimals, + using_cow=using_cow, + ) + + @final + def replace(self, to_replace, value, inplace: bool) -> Self: + inplace = validate_bool_kwarg(inplace, "inplace") + # NDFrame.replace ensures the not-is_list_likes here + assert not lib.is_list_like(to_replace) + assert not lib.is_list_like(value) + return self.apply_with_block( + "replace", + to_replace=to_replace, + value=value, + inplace=inplace, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + @final + def replace_regex(self, **kwargs) -> Self: + return self.apply_with_block( + "_replace_regex", + **kwargs, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + @final + def replace_list( + self, + src_list: list[Any], + dest_list: list[Any], + inplace: bool = False, + regex: bool = False, + ) -> Self: + """do a list replace""" + inplace = validate_bool_kwarg(inplace, "inplace") + + bm = self.apply_with_block( + "replace_list", + src_list=src_list, + dest_list=dest_list, + inplace=inplace, + regex=regex, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + bm._consolidate_inplace() + return bm + + def interpolate(self, inplace: bool, **kwargs) -> Self: + return self.apply_with_block( + "interpolate", + inplace=inplace, + **kwargs, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + def pad_or_backfill(self, inplace: bool, **kwargs) -> Self: + return self.apply_with_block( + "pad_or_backfill", + inplace=inplace, + **kwargs, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + def shift(self, periods: int, fill_value) -> Self: + if fill_value is lib.no_default: + fill_value = None + + return self.apply_with_block("shift", periods=periods, fill_value=fill_value) + + # -------------------------------------------------------------------- + # Consolidation: No-ops for all but BlockManager + + def is_consolidated(self) -> bool: + return True + + def consolidate(self) -> Self: + return self + + def _consolidate_inplace(self) -> None: + return + + +class SingleDataManager(DataManager): + @property + def ndim(self) -> Literal[1]: + return 1 + + @final + @property + def array(self) -> ArrayLike: + """ + Quick access to the backing array of the Block or SingleArrayManager. + """ + # error: "SingleDataManager" has no attribute "arrays"; maybe "array" + return self.arrays[0] # type: ignore[attr-defined] + + def setitem_inplace(self, indexer, value, warn: bool = True) -> None: + """ + Set values with indexer. + + For Single[Block/Array]Manager, this backs s[indexer] = value + + This is an inplace version of `setitem()`, mutating the manager/values + in place, not returning a new Manager (and Block), and thus never changing + the dtype. + """ + arr = self.array + + # EAs will do this validation in their own __setitem__ methods. + if isinstance(arr, np.ndarray): + # Note: checking for ndarray instead of np.dtype means we exclude + # dt64/td64, which do their own validation. + value = np_can_hold_element(arr.dtype, value) + + if isinstance(value, np.ndarray) and value.ndim == 1 and len(value) == 1: + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + value = value[0, ...] + + arr[indexer] = value + + def grouped_reduce(self, func): + arr = self.array + res = func(arr) + index = default_index(len(res)) + + mgr = type(self).from_array(res, index) + return mgr + + @classmethod + def from_array(cls, arr: ArrayLike, index: Index): + raise AbstractMethodError(cls) + + +def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None: + """ + Find the common dtype for `blocks`. + + Parameters + ---------- + blocks : List[DtypeObj] + + Returns + ------- + dtype : np.dtype, ExtensionDtype, or None + None is returned when `blocks` is empty. + """ + if not len(dtypes): + return None + + return find_common_type(dtypes) + + +def ensure_np_dtype(dtype: DtypeObj) -> np.dtype: + # TODO: https://github.com/pandas-dev/pandas/issues/22791 + # Give EAs some input on what happens here. Sparse needs this. + if isinstance(dtype, SparseDtype): + dtype = dtype.subtype + dtype = cast(np.dtype, dtype) + elif isinstance(dtype, ExtensionDtype): + dtype = np.dtype("object") + elif dtype == np.dtype(str): + dtype = np.dtype("object") + return dtype diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/blocks.py b/venv/lib/python3.10/site-packages/pandas/core/internals/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..259e969112dd7506b97520f3f2a683cbf7f7ef5d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/blocks.py @@ -0,0 +1,2850 @@ +from __future__ import annotations + +from functools import wraps +import inspect +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + final, +) +import warnings +import weakref + +import numpy as np + +from pandas._config import ( + get_option, + using_copy_on_write, + warn_copy_on_write, +) + +from pandas._libs import ( + NaT, + internals as libinternals, + lib, +) +from pandas._libs.internals import ( + BlockPlacement, + BlockValuesRefs, +) +from pandas._libs.missing import NA +from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeBackend, + DtypeObj, + F, + FillnaOptions, + IgnoreRaise, + InterpolateOptions, + QuantileInterpolation, + Self, + Shape, + npt, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.astype import ( + astype_array_safe, + astype_is_view, +) +from pandas.core.dtypes.cast import ( + LossySetitemError, + can_hold_element, + convert_dtypes, + find_result_type, + maybe_downcast_to_dtype, + np_can_hold_element, +) +from pandas.core.dtypes.common import ( + is_1d_only_ea_dtype, + is_float_dtype, + is_integer_dtype, + is_list_like, + is_scalar, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + NumpyEADtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCNumpyExtensionArray, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, +) + +from pandas.core import missing +import pandas.core.algorithms as algos +from pandas.core.array_algos.putmask import ( + extract_bool_array, + putmask_inplace, + putmask_without_repeat, + setitem_datetimelike_compat, + validate_putmask, +) +from pandas.core.array_algos.quantile import quantile_compat +from pandas.core.array_algos.replace import ( + compare_or_regex_search, + replace_regex, + should_use_regex, +) +from pandas.core.array_algos.transforms import shift +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + ExtensionArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.computation import expressions +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import check_setitem_lengths +from pandas.core.indexes.base import get_values_for_csv + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from pandas.core.api import Index + from pandas.core.arrays._mixins import NDArrayBackedExtensionArray + +# comparison is faster than is_object_dtype +_dtype_obj = np.dtype("object") + + +COW_WARNING_GENERAL_MSG = """\ +Setting a value on a view: behaviour will change in pandas 3.0. +You are mutating a Series or DataFrame object, and currently this mutation will +also have effect on other Series or DataFrame objects that share data with this +object. In pandas 3.0 (with Copy-on-Write), updating one Series or DataFrame object +will never modify another. +""" + + +COW_WARNING_SETITEM_MSG = """\ +Setting a value on a view: behaviour will change in pandas 3.0. +Currently, the mutation will also have effect on the object that shares data +with this object. For example, when setting a value in a Series that was +extracted from a column of a DataFrame, that DataFrame will also be updated: + + ser = df["col"] + ser[0] = 0 <--- in pandas 2, this also updates `df` + +In pandas 3.0 (with Copy-on-Write), updating one Series/DataFrame will never +modify another, and thus in the example above, `df` will not be changed. +""" + + +def maybe_split(meth: F) -> F: + """ + If we have a multi-column block, split and operate block-wise. Otherwise + use the original method. + """ + + @wraps(meth) + def newfunc(self, *args, **kwargs) -> list[Block]: + if self.ndim == 1 or self.shape[0] == 1: + return meth(self, *args, **kwargs) + else: + # Split and operate column-by-column + return self.split_and_operate(meth, *args, **kwargs) + + return cast(F, newfunc) + + +class Block(PandasObject, libinternals.Block): + """ + Canonical n-dimensional unit of homogeneous dtype contained in a pandas + data structure + + Index-ignorant; let the container take care of that + """ + + values: np.ndarray | ExtensionArray + ndim: int + refs: BlockValuesRefs + __init__: Callable + + __slots__ = () + is_numeric = False + + @final + @cache_readonly + def _validate_ndim(self) -> bool: + """ + We validate dimension for blocks that can hold 2D values, which for now + means numpy dtypes or DatetimeTZDtype. + """ + dtype = self.dtype + return not isinstance(dtype, ExtensionDtype) or isinstance( + dtype, DatetimeTZDtype + ) + + @final + @cache_readonly + def is_object(self) -> bool: + return self.values.dtype == _dtype_obj + + @final + @cache_readonly + def is_extension(self) -> bool: + return not lib.is_np_dtype(self.values.dtype) + + @final + @cache_readonly + def _can_consolidate(self) -> bool: + # We _could_ consolidate for DatetimeTZDtype but don't for now. + return not self.is_extension + + @final + @cache_readonly + def _consolidate_key(self): + return self._can_consolidate, self.dtype.name + + @final + @cache_readonly + def _can_hold_na(self) -> bool: + """ + Can we store NA values in this Block? + """ + dtype = self.dtype + if isinstance(dtype, np.dtype): + return dtype.kind not in "iub" + return dtype._can_hold_na + + @final + @property + def is_bool(self) -> bool: + """ + We can be bool if a) we are bool dtype or b) object dtype with bool objects. + """ + return self.values.dtype == np.dtype(bool) + + @final + def external_values(self): + return external_values(self.values) + + @final + @cache_readonly + def fill_value(self): + # Used in reindex_indexer + return na_value_for_dtype(self.dtype, compat=False) + + @final + def _standardize_fill_value(self, value): + # if we are passed a scalar None, convert it here + if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): + value = self.fill_value + return value + + @property + def mgr_locs(self) -> BlockPlacement: + return self._mgr_locs + + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: + self._mgr_locs = new_mgr_locs + + @final + def make_block( + self, + values, + placement: BlockPlacement | None = None, + refs: BlockValuesRefs | None = None, + ) -> Block: + """ + Create a new block, with type inference propagate any values that are + not specified + """ + if placement is None: + placement = self._mgr_locs + if self.is_extension: + values = ensure_block_shape(values, ndim=self.ndim) + + return new_block(values, placement=placement, ndim=self.ndim, refs=refs) + + @final + def make_block_same_class( + self, + values, + placement: BlockPlacement | None = None, + refs: BlockValuesRefs | None = None, + ) -> Self: + """Wrap given values in a block of same type as self.""" + # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet + # relied on it, as of 2.0 the caller is responsible for this. + if placement is None: + placement = self._mgr_locs + + # We assume maybe_coerce_values has already been called + return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) + + @final + def __repr__(self) -> str: + # don't want to print out all of the items here + name = type(self).__name__ + if self.ndim == 1: + result = f"{name}: {len(self)} dtype: {self.dtype}" + else: + shape = " x ".join([str(s) for s in self.shape]) + result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" + + return result + + @final + def __len__(self) -> int: + return len(self.values) + + @final + def slice_block_columns(self, slc: slice) -> Self: + """ + Perform __getitem__-like, return result as block. + """ + new_mgr_locs = self._mgr_locs[slc] + + new_values = self._slice(slc) + refs = self.refs + return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) + + @final + def take_block_columns(self, indices: npt.NDArray[np.intp]) -> Self: + """ + Perform __getitem__-like, return result as block. + + Only supports slices that preserve dimensionality. + """ + # Note: only called from is from internals.concat, and we can verify + # that never happens with 1-column blocks, i.e. never for ExtensionBlock. + + new_mgr_locs = self._mgr_locs[indices] + + new_values = self._slice(indices) + return type(self)(new_values, new_mgr_locs, self.ndim, refs=None) + + @final + def getitem_block_columns( + self, slicer: slice, new_mgr_locs: BlockPlacement, ref_inplace_op: bool = False + ) -> Self: + """ + Perform __getitem__-like, return result as block. + + Only supports slices that preserve dimensionality. + """ + new_values = self._slice(slicer) + refs = self.refs if not ref_inplace_op or self.refs.has_reference() else None + return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) + + @final + def _can_hold_element(self, element: Any) -> bool: + """require the same dtype as ourselves""" + element = extract_array(element, extract_numpy=True) + return can_hold_element(self.values, element) + + @final + def should_store(self, value: ArrayLike) -> bool: + """ + Should we set self.values[indexer] = value inplace or do we need to cast? + + Parameters + ---------- + value : np.ndarray or ExtensionArray + + Returns + ------- + bool + """ + return value.dtype == self.dtype + + # --------------------------------------------------------------------- + # Apply/Reduce and Helpers + + @final + def apply(self, func, **kwargs) -> list[Block]: + """ + apply the function to my values; return a block if we are not + one + """ + result = func(self.values, **kwargs) + + result = maybe_coerce_values(result) + return self._split_op_result(result) + + @final + def reduce(self, func) -> list[Block]: + # We will apply the function and reshape the result into a single-row + # Block with the same mgr_locs; squeezing will be done at a higher level + assert self.ndim == 2 + + result = func(self.values) + + if self.values.ndim == 1: + res_values = result + else: + res_values = result.reshape(-1, 1) + + nb = self.make_block(res_values) + return [nb] + + @final + def _split_op_result(self, result: ArrayLike) -> list[Block]: + # See also: split_and_operate + if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): + # TODO(EA2D): unnecessary with 2D EAs + # if we get a 2D ExtensionArray, we need to split it into 1D pieces + nbs = [] + for i, loc in enumerate(self._mgr_locs): + if not is_1d_only_ea_dtype(result.dtype): + vals = result[i : i + 1] + else: + vals = result[i] + + bp = BlockPlacement(loc) + block = self.make_block(values=vals, placement=bp) + nbs.append(block) + return nbs + + nb = self.make_block(result) + + return [nb] + + @final + def _split(self) -> list[Block]: + """ + Split a block into a list of single-column blocks. + """ + assert self.ndim == 2 + + new_blocks = [] + for i, ref_loc in enumerate(self._mgr_locs): + vals = self.values[slice(i, i + 1)] + + bp = BlockPlacement(ref_loc) + nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) + new_blocks.append(nb) + return new_blocks + + @final + def split_and_operate(self, func, *args, **kwargs) -> list[Block]: + """ + Split the block and apply func column-by-column. + + Parameters + ---------- + func : Block method + *args + **kwargs + + Returns + ------- + List[Block] + """ + assert self.ndim == 2 and self.shape[0] != 1 + + res_blocks = [] + for nb in self._split(): + rbs = func(nb, *args, **kwargs) + res_blocks.extend(rbs) + return res_blocks + + # --------------------------------------------------------------------- + # Up/Down-casting + + @final + def coerce_to_target_dtype(self, other, warn_on_upcast: bool = False) -> Block: + """ + coerce the current block to a dtype compat for other + we will return a block, possibly object, and not raise + + we can also safely try to coerce to the same dtype + and will receive the same block + """ + new_dtype = find_result_type(self.values.dtype, other) + if new_dtype == self.dtype: + # GH#52927 avoid RecursionError + raise AssertionError( + "Something has gone wrong, please report a bug at " + "https://github.com/pandas-dev/pandas/issues" + ) + + # In a future version of pandas, the default will be that + # setting `nan` into an integer series won't raise. + if ( + is_scalar(other) + and is_integer_dtype(self.values.dtype) + and isna(other) + and other is not NaT + and not ( + isinstance(other, (np.datetime64, np.timedelta64)) and np.isnat(other) + ) + ): + warn_on_upcast = False + elif ( + isinstance(other, np.ndarray) + and other.ndim == 1 + and is_integer_dtype(self.values.dtype) + and is_float_dtype(other.dtype) + and lib.has_only_ints_or_nan(other) + ): + warn_on_upcast = False + + if warn_on_upcast: + warnings.warn( + f"Setting an item of incompatible dtype is deprecated " + "and will raise an error in a future version of pandas. " + f"Value '{other}' has dtype incompatible with {self.values.dtype}, " + "please explicitly cast to a compatible dtype first.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if self.values.dtype == new_dtype: + raise AssertionError( + f"Did not expect new dtype {new_dtype} to equal self.dtype " + f"{self.values.dtype}. Please report a bug at " + "https://github.com/pandas-dev/pandas/issues." + ) + return self.astype(new_dtype, copy=False) + + @final + def _maybe_downcast( + self, + blocks: list[Block], + downcast, + using_cow: bool, + caller: str, + ) -> list[Block]: + if downcast is False: + return blocks + + if self.dtype == _dtype_obj: + # TODO: does it matter that self.dtype might not match blocks[i].dtype? + # GH#44241 We downcast regardless of the argument; + # respecting 'downcast=None' may be worthwhile at some point, + # but ATM it breaks too much existing code. + # split and convert the blocks + + if caller == "fillna" and get_option("future.no_silent_downcasting"): + return blocks + + nbs = extend_blocks( + [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] + ) + if caller == "fillna": + if len(nbs) != len(blocks) or not all( + x.dtype == y.dtype for x, y in zip(nbs, blocks) + ): + # GH#54261 + warnings.warn( + "Downcasting object dtype arrays on .fillna, .ffill, .bfill " + "is deprecated and will change in a future version. " + "Call result.infer_objects(copy=False) instead. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return nbs + + elif downcast is None: + return blocks + elif caller == "where" and get_option("future.no_silent_downcasting") is True: + return blocks + else: + nbs = extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks]) + + # When _maybe_downcast is called with caller="where", it is either + # a) with downcast=False, which is a no-op (the desired future behavior) + # b) with downcast="infer", which is _not_ passed by the user. + # In the latter case the future behavior is to stop doing inference, + # so we issue a warning if and only if some inference occurred. + if caller == "where": + # GH#53656 + if len(blocks) != len(nbs) or any( + left.dtype != right.dtype for left, right in zip(blocks, nbs) + ): + # In this case _maybe_downcast was _not_ a no-op, so the behavior + # will change, so we issue a warning. + warnings.warn( + "Downcasting behavior in Series and DataFrame methods 'where', " + "'mask', and 'clip' is deprecated. In a future " + "version this will not infer object dtypes or cast all-round " + "floats to integers. Instead call " + "result.infer_objects(copy=False) for object inference, " + "or cast round floats explicitly. To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return nbs + + @final + @maybe_split + def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]: + """ + downcast specialized to 2D case post-validation. + + Refactored to allow use of maybe_split. + """ + new_values = maybe_downcast_to_dtype(self.values, dtype=dtype) + new_values = maybe_coerce_values(new_values) + refs = self.refs if new_values is self.values else None + return [self.make_block(new_values, refs=refs)] + + @final + def convert( + self, + *, + copy: bool = True, + using_cow: bool = False, + ) -> list[Block]: + """ + Attempt to coerce any object types to better types. Return a copy + of the block (if copy = True). + """ + if not self.is_object: + if not copy and using_cow: + return [self.copy(deep=False)] + return [self.copy()] if copy else [self] + + if self.ndim != 1 and self.shape[0] != 1: + blocks = self.split_and_operate( + Block.convert, copy=copy, using_cow=using_cow + ) + if all(blk.dtype.kind == "O" for blk in blocks): + # Avoid fragmenting the block if convert is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] if copy else [self] + return blocks + + values = self.values + if values.ndim == 2: + # the check above ensures we only get here with values.shape[0] == 1, + # avoid doing .ravel as that might make a copy + values = values[0] + + res_values = lib.maybe_convert_objects( + values, # type: ignore[arg-type] + convert_non_numeric=True, + ) + refs = None + if copy and res_values is values: + res_values = values.copy() + elif res_values is values: + refs = self.refs + + res_values = ensure_block_shape(res_values, self.ndim) + res_values = maybe_coerce_values(res_values) + return [self.make_block(res_values, refs=refs)] + + def convert_dtypes( + self, + copy: bool, + using_cow: bool, + infer_objects: bool = True, + convert_string: bool = True, + convert_integer: bool = True, + convert_boolean: bool = True, + convert_floating: bool = True, + dtype_backend: DtypeBackend = "numpy_nullable", + ) -> list[Block]: + if infer_objects and self.is_object: + blks = self.convert(copy=False, using_cow=using_cow) + else: + blks = [self] + + if not any( + [convert_floating, convert_integer, convert_boolean, convert_string] + ): + return [b.copy(deep=copy) for b in blks] + + rbs = [] + for blk in blks: + # Determine dtype column by column + sub_blks = [blk] if blk.ndim == 1 or self.shape[0] == 1 else blk._split() + dtypes = [ + convert_dtypes( + b.values, + convert_string, + convert_integer, + convert_boolean, + convert_floating, + infer_objects, + dtype_backend, + ) + for b in sub_blks + ] + if all(dtype == self.dtype for dtype in dtypes): + # Avoid block splitting if no dtype changes + rbs.append(blk.copy(deep=copy)) + continue + + for dtype, b in zip(dtypes, sub_blks): + rbs.append(b.astype(dtype=dtype, copy=copy, squeeze=b.ndim != 1)) + return rbs + + # --------------------------------------------------------------------- + # Array-Like Methods + + @final + @cache_readonly + def dtype(self) -> DtypeObj: + return self.values.dtype + + @final + def astype( + self, + dtype: DtypeObj, + copy: bool = False, + errors: IgnoreRaise = "raise", + using_cow: bool = False, + squeeze: bool = False, + ) -> Block: + """ + Coerce to the new dtype. + + Parameters + ---------- + dtype : np.dtype or ExtensionDtype + copy : bool, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + using_cow: bool, default False + Signaling if copy on write copy logic is used. + squeeze : bool, default False + squeeze values to ndim=1 if only one column is given + + Returns + ------- + Block + """ + values = self.values + if squeeze and values.ndim == 2 and is_1d_only_ea_dtype(dtype): + if values.shape[0] != 1: + raise ValueError("Can not squeeze with more than one column.") + values = values[0, :] # type: ignore[call-overload] + + new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) + + new_values = maybe_coerce_values(new_values) + + refs = None + if (using_cow or not copy) and astype_is_view(values.dtype, new_values.dtype): + refs = self.refs + + newb = self.make_block(new_values, refs=refs) + if newb.shape != self.shape: + raise TypeError( + f"cannot set astype for copy = [{copy}] for dtype " + f"({self.dtype.name} [{self.shape}]) to different shape " + f"({newb.dtype.name} [{newb.shape}])" + ) + return newb + + @final + def get_values_for_csv( + self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None + ) -> Block: + """convert to our native types format""" + result = get_values_for_csv( + self.values, + na_rep=na_rep, + quoting=quoting, + float_format=float_format, + date_format=date_format, + decimal=decimal, + ) + return self.make_block(result) + + @final + def copy(self, deep: bool = True) -> Self: + """copy constructor""" + values = self.values + refs: BlockValuesRefs | None + if deep: + values = values.copy() + refs = None + else: + refs = self.refs + return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) + + # --------------------------------------------------------------------- + # Copy-on-Write Helpers + + @final + def _maybe_copy(self, using_cow: bool, inplace: bool) -> Self: + if using_cow and inplace: + deep = self.refs.has_reference() + blk = self.copy(deep=deep) + else: + blk = self if inplace else self.copy() + return blk + + @final + def _get_refs_and_copy(self, using_cow: bool, inplace: bool): + refs = None + copy = not inplace + if inplace: + if using_cow and self.refs.has_reference(): + copy = True + else: + refs = self.refs + return copy, refs + + # --------------------------------------------------------------------- + # Replace + + @final + def replace( + self, + to_replace, + value, + inplace: bool = False, + # mask may be pre-computed if we're called from replace_list + mask: npt.NDArray[np.bool_] | None = None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + """ + replace the to_replace value with value, possible to create new + blocks here this is just a call to putmask. + """ + + # Note: the checks we do in NDFrame.replace ensure we never get + # here with listlike to_replace or value, as those cases + # go through replace_list + values = self.values + + if isinstance(values, Categorical): + # TODO: avoid special-casing + # GH49404 + blk = self._maybe_copy(using_cow, inplace) + values = cast(Categorical, blk.values) + values._replace(to_replace=to_replace, value=value, inplace=True) + return [blk] + + if not self._can_hold_element(to_replace): + # We cannot hold `to_replace`, so we know immediately that + # replacing it is a no-op. + # Note: If to_replace were a list, NDFrame.replace would call + # replace_list instead of replace. + if using_cow: + return [self.copy(deep=False)] + else: + return [self] if inplace else [self.copy()] + + if mask is None: + mask = missing.mask_missing(values, to_replace) + if not mask.any(): + # Note: we get here with test_replace_extension_other incorrectly + # bc _can_hold_element is incorrect. + if using_cow: + return [self.copy(deep=False)] + else: + return [self] if inplace else [self.copy()] + + elif self._can_hold_element(value): + # TODO(CoW): Maybe split here as well into columns where mask has True + # and rest? + blk = self._maybe_copy(using_cow, inplace) + putmask_inplace(blk.values, mask, value) + if ( + inplace + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + if not (self.is_object and value is None): + # if the user *explicitly* gave None, we keep None, otherwise + # may downcast to NaN + if get_option("future.no_silent_downcasting") is True: + blocks = [blk] + else: + blocks = blk.convert(copy=False, using_cow=using_cow) + if len(blocks) > 1 or blocks[0].dtype != blk.dtype: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated and " + "will be removed in a future version. To retain the old " + "behavior, explicitly call " + "`result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + blocks = [blk] + return blocks + + elif self.ndim == 1 or self.shape[0] == 1: + if value is None or value is NA: + blk = self.astype(np.dtype(object)) + else: + blk = self.coerce_to_target_dtype(value) + return blk.replace( + to_replace=to_replace, + value=value, + inplace=True, + mask=mask, + ) + + else: + # split so that we only upcast where necessary + blocks = [] + for i, nb in enumerate(self._split()): + blocks.extend( + type(self).replace( + nb, + to_replace=to_replace, + value=value, + inplace=True, + mask=mask[i : i + 1], + using_cow=using_cow, + ) + ) + return blocks + + @final + def _replace_regex( + self, + to_replace, + value, + inplace: bool = False, + mask=None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + """ + Replace elements by the given value. + + Parameters + ---------- + to_replace : object or pattern + Scalar to replace or regular expression to match. + value : object + Replacement object. + inplace : bool, default False + Perform inplace modification. + mask : array-like of bool, optional + True indicate corresponding element is ignored. + using_cow: bool, default False + Specifying if copy on write is enabled. + + Returns + ------- + List[Block] + """ + if not self._can_hold_element(to_replace): + # i.e. only if self.is_object is True, but could in principle include a + # String ExtensionBlock + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + rx = re.compile(to_replace) + + block = self._maybe_copy(using_cow, inplace) + + replace_regex(block.values, rx, value, mask) + + if ( + inplace + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + nbs = block.convert(copy=False, using_cow=using_cow) + opt = get_option("future.no_silent_downcasting") + if (len(nbs) > 1 or nbs[0].dtype != block.dtype) and not opt: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated and " + "will be removed in a future version. To retain the old " + "behavior, explicitly call `result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return nbs + + @final + def replace_list( + self, + src_list: Iterable[Any], + dest_list: Sequence[Any], + inplace: bool = False, + regex: bool = False, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + """ + See BlockManager.replace_list docstring. + """ + values = self.values + + if isinstance(values, Categorical): + # TODO: avoid special-casing + # GH49404 + blk = self._maybe_copy(using_cow, inplace) + values = cast(Categorical, blk.values) + values._replace(to_replace=src_list, value=dest_list, inplace=True) + return [blk] + + # Exclude anything that we know we won't contain + pairs = [ + (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) + ] + if not len(pairs): + if using_cow: + return [self.copy(deep=False)] + # shortcut, nothing to replace + return [self] if inplace else [self.copy()] + + src_len = len(pairs) - 1 + + if is_string_dtype(values.dtype): + # Calculate the mask once, prior to the call of comp + # in order to avoid repeating the same computations + na_mask = ~isna(values) + masks: Iterable[npt.NDArray[np.bool_]] = ( + extract_bool_array( + cast( + ArrayLike, + compare_or_regex_search( + values, s[0], regex=regex, mask=na_mask + ), + ) + ) + for s in pairs + ) + else: + # GH#38086 faster if we know we dont need to check for regex + masks = (missing.mask_missing(values, s[0]) for s in pairs) + # Materialize if inplace = True, since the masks can change + # as we replace + if inplace: + masks = list(masks) + + if using_cow: + # Don't set up refs here, otherwise we will think that we have + # references when we check again later + rb = [self] + else: + rb = [self if inplace else self.copy()] + + if ( + inplace + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + opt = get_option("future.no_silent_downcasting") + for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): + convert = i == src_len # only convert once at the end + new_rb: list[Block] = [] + + # GH-39338: _replace_coerce can split a block into + # single-column blocks, so track the index so we know + # where to index into the mask + for blk_num, blk in enumerate(rb): + if len(rb) == 1: + m = mask + else: + mib = mask + assert not isinstance(mib, bool) + m = mib[blk_num : blk_num + 1] + + # error: Argument "mask" to "_replace_coerce" of "Block" has + # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]"; + # expected "ndarray[Any, dtype[bool_]]" + result = blk._replace_coerce( + to_replace=src, + value=dest, + mask=m, + inplace=inplace, + regex=regex, + using_cow=using_cow, + ) + + if using_cow and i != src_len: + # This is ugly, but we have to get rid of intermediate refs + # that did not go out of scope yet, otherwise we will trigger + # many unnecessary copies + for b in result: + ref = weakref.ref(b) + b.refs.referenced_blocks.pop( + b.refs.referenced_blocks.index(ref) + ) + + if ( + not opt + and convert + and blk.is_object + and not all(x is None for x in dest_list) + ): + # GH#44498 avoid unwanted cast-back + nbs = [] + for res_blk in result: + converted = res_blk.convert( + copy=True and not using_cow, using_cow=using_cow + ) + if len(converted) > 1 or converted[0].dtype != res_blk.dtype: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated " + "and will be removed in a future version. To " + "retain the old behavior, explicitly call " + "`result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + nbs.extend(converted) + result = nbs + new_rb.extend(result) + rb = new_rb + return rb + + @final + def _replace_coerce( + self, + to_replace, + value, + mask: npt.NDArray[np.bool_], + inplace: bool = True, + regex: bool = False, + using_cow: bool = False, + ) -> list[Block]: + """ + Replace value corresponding to the given boolean array with another + value. + + Parameters + ---------- + to_replace : object or pattern + Scalar to replace or regular expression to match. + value : object + Replacement object. + mask : np.ndarray[bool] + True indicate corresponding element is ignored. + inplace : bool, default True + Perform inplace modification. + regex : bool, default False + If true, perform regular expression substitution. + + Returns + ------- + List[Block] + """ + if should_use_regex(regex, to_replace): + return self._replace_regex( + to_replace, + value, + inplace=inplace, + mask=mask, + ) + else: + if value is None: + # gh-45601, gh-45836, gh-46634 + if mask.any(): + has_ref = self.refs.has_reference() + nb = self.astype(np.dtype(object), copy=False, using_cow=using_cow) + if (nb is self or using_cow) and not inplace: + nb = nb.copy() + elif inplace and has_ref and nb.refs.has_reference() and using_cow: + # no copy in astype and we had refs before + nb = nb.copy() + putmask_inplace(nb.values, mask, value) + return [nb] + if using_cow: + return [self] + return [self] if inplace else [self.copy()] + return self.replace( + to_replace=to_replace, + value=value, + inplace=inplace, + mask=mask, + using_cow=using_cow, + ) + + # --------------------------------------------------------------------- + # 2D Methods - Shared by NumpyBlock and NDArrayBackedExtensionBlock + # but not ExtensionBlock + + def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: + """ + For compatibility with 1D-only ExtensionArrays. + """ + return arg + + def _unwrap_setitem_indexer(self, indexer): + """ + For compatibility with 1D-only ExtensionArrays. + """ + return indexer + + # NB: this cannot be made cache_readonly because in mgr.set_values we pin + # new .values that can have different shape GH#42631 + @property + def shape(self) -> Shape: + return self.values.shape + + def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: + # In the case where we have a tuple[slice, int], the slice will always + # be slice(None) + # Note: only reached with self.ndim == 2 + # Invalid index type "Union[int, Tuple[int, int], Tuple[slice, int]]" + # for "Union[ndarray[Any, Any], ExtensionArray]"; expected type + # "Union[int, integer[Any]]" + return self.values[i] # type: ignore[index] + + def _slice( + self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] + ) -> ArrayLike: + """return a slice of my values""" + + return self.values[slicer] + + def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: + """ + Modify block values in-place with new item value. + + If copy=True, first copy the underlying values in place before modifying + (for Copy-on-Write). + + Notes + ----- + `set_inplace` never creates a new array or new Block, whereas `setitem` + _may_ create a new array and always creates a new Block. + + Caller is responsible for checking values.dtype == self.dtype. + """ + if copy: + self.values = self.values.copy() + self.values[locs] = values + + @final + def take_nd( + self, + indexer: npt.NDArray[np.intp], + axis: AxisInt, + new_mgr_locs: BlockPlacement | None = None, + fill_value=lib.no_default, + ) -> Block: + """ + Take values according to indexer and return them as a block. + """ + values = self.values + + if fill_value is lib.no_default: + fill_value = self.fill_value + allow_fill = False + else: + allow_fill = True + + # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype + new_values = algos.take_nd( + values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value + ) + + # Called from three places in managers, all of which satisfy + # these assertions + if isinstance(self, ExtensionBlock): + # NB: in this case, the 'axis' kwarg will be ignored in the + # algos.take_nd call above. + assert not (self.ndim == 1 and new_mgr_locs is None) + assert not (axis == 0 and new_mgr_locs is None) + + if new_mgr_locs is None: + new_mgr_locs = self._mgr_locs + + if new_values.dtype != self.dtype: + return self.make_block(new_values, new_mgr_locs) + else: + return self.make_block_same_class(new_values, new_mgr_locs) + + def _unstack( + self, + unstacker, + fill_value, + new_placement: npt.NDArray[np.intp], + needs_masking: npt.NDArray[np.bool_], + ): + """ + Return a list of unstacked blocks of self + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : int + Only used in ExtensionBlock._unstack + new_placement : np.ndarray[np.intp] + allow_fill : bool + needs_masking : np.ndarray[bool] + + Returns + ------- + blocks : list of Block + New blocks of unstacked values. + mask : array-like of bool + The mask of columns of `blocks` we should keep. + """ + new_values, mask = unstacker.get_new_values( + self.values.T, fill_value=fill_value + ) + + mask = mask.any(0) + # TODO: in all tests we have mask.all(); can we rely on that? + + # Note: these next two lines ensure that + # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) + # which the calling function needs in order to pass verify_integrity=False + # to the BlockManager constructor + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + bp = BlockPlacement(new_placement) + blocks = [new_block_2d(new_values, placement=bp)] + return blocks, mask + + # --------------------------------------------------------------------- + + def setitem(self, indexer, value, using_cow: bool = False) -> Block: + """ + Attempt self.values[indexer] = value, possibly creating a new array. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice, int + The subset of self.values to set + value : object + The value being set + using_cow: bool, default False + Signaling if CoW is used. + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + + value = self._standardize_fill_value(value) + + values = cast(np.ndarray, self.values) + if self.ndim == 2: + values = values.T + + # length checking + check_setitem_lengths(indexer, value, values) + + if self.dtype != _dtype_obj: + # GH48933: extract_array would convert a pd.Series value to np.ndarray + value = extract_array(value, extract_numpy=True) + try: + casted = np_can_hold_element(values.dtype, value) + except LossySetitemError: + # current dtype cannot store value, coerce to common dtype + nb = self.coerce_to_target_dtype(value, warn_on_upcast=True) + return nb.setitem(indexer, value) + else: + if self.dtype == _dtype_obj: + # TODO: avoid having to construct values[indexer] + vi = values[indexer] + if lib.is_list_like(vi): + # checking lib.is_scalar here fails on + # test_iloc_setitem_custom_object + casted = setitem_datetimelike_compat(values, len(vi), casted) + + self = self._maybe_copy(using_cow, inplace=True) + values = cast(np.ndarray, self.values.T) + if isinstance(casted, np.ndarray) and casted.ndim == 1 and len(casted) == 1: + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + casted = casted[0, ...] + try: + values[indexer] = casted + except (TypeError, ValueError) as err: + if is_list_like(casted): + raise ValueError( + "setting an array element with a sequence." + ) from err + raise + return self + + def putmask( + self, mask, new, using_cow: bool = False, already_warned=None + ) -> list[Block]: + """ + putmask the data to the block; it is possible that we may create a + new dtype of block + + Return the resulting block(s). + + Parameters + ---------- + mask : np.ndarray[bool], SparseArray[bool], or BooleanArray + new : a ndarray/object + using_cow: bool, default False + + Returns + ------- + List[Block] + """ + orig_mask = mask + values = cast(np.ndarray, self.values) + mask, noop = validate_putmask(values.T, mask) + assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) + + if new is lib.no_default: + new = self.fill_value + + new = self._standardize_fill_value(new) + new = extract_array(new, extract_numpy=True) + + if noop: + if using_cow: + return [self.copy(deep=False)] + return [self] + + if ( + warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + try: + casted = np_can_hold_element(values.dtype, new) + + self = self._maybe_copy(using_cow, inplace=True) + values = cast(np.ndarray, self.values) + + putmask_without_repeat(values.T, mask, casted) + return [self] + except LossySetitemError: + if self.ndim == 1 or self.shape[0] == 1: + # no need to split columns + + if not is_list_like(new): + # using just new[indexer] can't save us the need to cast + return self.coerce_to_target_dtype( + new, warn_on_upcast=True + ).putmask(mask, new) + else: + indexer = mask.nonzero()[0] + nb = self.setitem(indexer, new[indexer], using_cow=using_cow) + return [nb] + + else: + is_array = isinstance(new, np.ndarray) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = new + if is_array: + # we have a different value per-column + n = new[:, i : i + 1] + + submask = orig_mask[:, i : i + 1] + rbs = nb.putmask(submask, n, using_cow=using_cow) + res_blocks.extend(rbs) + return res_blocks + + def where( + self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False + ) -> list[Block]: + """ + evaluate the block; return result block(s) from the result + + Parameters + ---------- + other : a ndarray/object + cond : np.ndarray[bool], SparseArray[bool], or BooleanArray + _downcast : str or None, default "infer" + Private because we only specify it when calling from fillna. + + Returns + ------- + List[Block] + """ + assert cond.ndim == self.ndim + assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) + + transpose = self.ndim == 2 + + cond = extract_bool_array(cond) + + # EABlocks override where + values = cast(np.ndarray, self.values) + orig_other = other + if transpose: + values = values.T + + icond, noop = validate_putmask(values, ~cond) + if noop: + # GH-39595: Always return a copy; short-circuit up/downcasting + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] + + if other is lib.no_default: + other = self.fill_value + + other = self._standardize_fill_value(other) + + try: + # try/except here is equivalent to a self._can_hold_element check, + # but this gets us back 'casted' which we will reuse below; + # without using 'casted', expressions.where may do unwanted upcasts. + casted = np_can_hold_element(values.dtype, other) + except (ValueError, TypeError, LossySetitemError): + # we cannot coerce, return a compat dtype + + if self.ndim == 1 or self.shape[0] == 1: + # no need to split columns + + block = self.coerce_to_target_dtype(other) + blocks = block.where(orig_other, cond, using_cow=using_cow) + return self._maybe_downcast( + blocks, downcast=_downcast, using_cow=using_cow, caller="where" + ) + + else: + # since _maybe_downcast would split blocks anyway, we + # can avoid some potential upcast/downcast by splitting + # on the front end. + is_array = isinstance(other, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + oth = other + if is_array: + # we have a different value per-column + oth = other[:, i : i + 1] + + submask = cond[:, i : i + 1] + rbs = nb.where( + oth, submask, _downcast=_downcast, using_cow=using_cow + ) + res_blocks.extend(rbs) + return res_blocks + + else: + other = casted + alt = setitem_datetimelike_compat(values, icond.sum(), other) + if alt is not other: + if is_list_like(other) and len(other) < len(values): + # call np.where with other to get the appropriate ValueError + np.where(~icond, values, other) + raise NotImplementedError( + "This should not be reached; call to np.where above is " + "expected to raise ValueError. Please report a bug at " + "github.com/pandas-dev/pandas" + ) + result = values.copy() + np.putmask(result, icond, alt) + else: + # By the time we get here, we should have all Series/Index + # args extracted to ndarray + if ( + is_list_like(other) + and not isinstance(other, np.ndarray) + and len(other) == self.shape[-1] + ): + # If we don't do this broadcasting here, then expressions.where + # will broadcast a 1D other to be row-like instead of + # column-like. + other = np.array(other).reshape(values.shape) + # If lengths don't match (or len(other)==1), we will raise + # inside expressions.where, see test_series_where + + # Note: expressions.where may upcast. + result = expressions.where(~icond, values, other) + # The np_can_hold_element check _should_ ensure that we always + # have result.dtype == self.dtype here. + + if transpose: + result = result.T + + return [self.make_block(result)] + + def fillna( + self, + value, + limit: int | None = None, + inplace: bool = False, + downcast=None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + """ + fillna on the block with the value. If we fail, then convert to + block to hold objects instead and try again + """ + # Caller is responsible for validating limit; if int it is strictly positive + inplace = validate_bool_kwarg(inplace, "inplace") + + if not self._can_hold_na: + # can short-circuit the isna call + noop = True + else: + mask = isna(self.values) + mask, noop = validate_putmask(self.values, mask) + + if noop: + # we can't process the value, but nothing to do + if inplace: + if using_cow: + return [self.copy(deep=False)] + # Arbitrarily imposing the convention that we ignore downcast + # on no-op when inplace=True + return [self] + else: + # GH#45423 consistent downcasting on no-ops. + nb = self.copy(deep=not using_cow) + nbs = nb._maybe_downcast( + [nb], downcast=downcast, using_cow=using_cow, caller="fillna" + ) + return nbs + + if limit is not None: + mask[mask.cumsum(self.ndim - 1) > limit] = False + + if inplace: + nbs = self.putmask( + mask.T, value, using_cow=using_cow, already_warned=already_warned + ) + else: + # without _downcast, we would break + # test_fillna_dtype_conversion_equiv_replace + nbs = self.where(value, ~mask.T, _downcast=False) + + # Note: blk._maybe_downcast vs self._maybe_downcast(nbs) + # makes a difference bc blk may have object dtype, which has + # different behavior in _maybe_downcast. + return extend_blocks( + [ + blk._maybe_downcast( + [blk], downcast=downcast, using_cow=using_cow, caller="fillna" + ) + for blk in nbs + ] + ) + + def pad_or_backfill( + self, + *, + method: FillnaOptions, + axis: AxisInt = 0, + inplace: bool = False, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + if not self._can_hold_na: + # If there are no NAs, then interpolate is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + # Dispatch to the NumpyExtensionArray method. + # We know self.array_values is a NumpyExtensionArray bc EABlock overrides + vals = cast(NumpyExtensionArray, self.array_values) + if axis == 1: + vals = vals.T + new_values = vals._pad_or_backfill( + method=method, + limit=limit, + limit_area=limit_area, + copy=copy, + ) + if ( + not copy + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + if axis == 1: + new_values = new_values.T + + data = extract_array(new_values, extract_numpy=True) + + nb = self.make_block_same_class(data, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow, caller="fillna") + + @final + def interpolate( + self, + *, + method: InterpolateOptions, + index: Index, + inplace: bool = False, + limit: int | None = None, + limit_direction: Literal["forward", "backward", "both"] = "forward", + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + already_warned=None, + **kwargs, + ) -> list[Block]: + inplace = validate_bool_kwarg(inplace, "inplace") + # error: Non-overlapping equality check [...] + if method == "asfreq": # type: ignore[comparison-overlap] + # clean_fill_method used to allow this + missing.clean_fill_method(method) + + if not self._can_hold_na: + # If there are no NAs, then interpolate is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + # TODO(3.0): this case will not be reachable once GH#53638 is enforced + if self.dtype == _dtype_obj: + # only deal with floats + # bc we already checked that can_hold_na, we don't have int dtype here + # test_interp_basic checks that we make a copy here + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + # Dispatch to the EA method. + new_values = self.array_values.interpolate( + method=method, + axis=self.ndim - 1, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + copy=copy, + **kwargs, + ) + data = extract_array(new_values, extract_numpy=True) + + if ( + not copy + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + nb = self.make_block_same_class(data, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow, caller="interpolate") + + @final + def diff(self, n: int) -> list[Block]: + """return block for the diff of the values""" + # only reached with ndim == 2 + # TODO(EA2D): transpose will be unnecessary with 2D EAs + new_values = algos.diff(self.values.T, n, axis=0).T + return [self.make_block(values=new_values)] + + def shift(self, periods: int, fill_value: Any = None) -> list[Block]: + """shift the block by periods, possibly upcast""" + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + axis = self.ndim - 1 + + # Note: periods is never 0 here, as that is handled at the top of + # NDFrame.shift. If that ever changes, we can do a check for periods=0 + # and possibly avoid coercing. + + if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + # see test_shift_object_non_scalar_fill + raise ValueError("fill_value must be a scalar") + + fill_value = self._standardize_fill_value(fill_value) + + try: + # error: Argument 1 to "np_can_hold_element" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" + casted = np_can_hold_element( + self.dtype, fill_value # type: ignore[arg-type] + ) + except LossySetitemError: + nb = self.coerce_to_target_dtype(fill_value) + return nb.shift(periods, fill_value=fill_value) + + else: + values = cast(np.ndarray, self.values) + new_values = shift(values, periods, axis, casted) + return [self.make_block_same_class(new_values)] + + @final + def quantile( + self, + qs: Index, # with dtype float64 + interpolation: QuantileInterpolation = "linear", + ) -> Block: + """ + compute the quantiles of the + + Parameters + ---------- + qs : Index + The quantiles to be computed in float64. + interpolation : str, default 'linear' + Type of interpolation. + + Returns + ------- + Block + """ + # We should always have ndim == 2 because Series dispatches to DataFrame + assert self.ndim == 2 + assert is_list_like(qs) # caller is responsible for this + + result = quantile_compat(self.values, np.asarray(qs._values), interpolation) + # ensure_block_shape needed for cases where we start with EA and result + # is ndarray, e.g. IntegerArray, SparseArray + result = ensure_block_shape(result, ndim=2) + return new_block_2d(result, placement=self._mgr_locs) + + @final + def round(self, decimals: int, using_cow: bool = False) -> Self: + """ + Rounds the values. + If the block is not of an integer or float dtype, nothing happens. + This is consistent with DataFrame.round behavivor. + (Note: Series.round would raise) + + Parameters + ---------- + decimals: int, + Number of decimal places to round to. + Caller is responsible for validating this + using_cow: bool, + Whether Copy on Write is enabled right now + """ + if not self.is_numeric or self.is_bool: + return self.copy(deep=not using_cow) + refs = None + # TODO: round only defined on BaseMaskedArray + # Series also does this, so would need to fix both places + # error: Item "ExtensionArray" of "Union[ndarray[Any, Any], ExtensionArray]" + # has no attribute "round" + values = self.values.round(decimals) # type: ignore[union-attr] + if values is self.values: + if not using_cow: + # Normally would need to do this before, but + # numpy only returns same array when round operation + # is no-op + # https://github.com/numpy/numpy/blob/486878b37fc7439a3b2b87747f50db9b62fea8eb/numpy/core/src/multiarray/calculation.c#L625-L636 + values = values.copy() + else: + refs = self.refs + return self.make_block_same_class(values, refs=refs) + + # --------------------------------------------------------------------- + # Abstract Methods Overridden By EABackedBlock and NumpyBlock + + def delete(self, loc) -> list[Block]: + """Deletes the locs from the block. + + We split the block to avoid copying the underlying data. We create new + blocks for every connected segment of the initial block that is not deleted. + The new blocks point to the initial array. + """ + if not is_list_like(loc): + loc = [loc] + + if self.ndim == 1: + values = cast(np.ndarray, self.values) + values = np.delete(values, loc) + mgr_locs = self._mgr_locs.delete(loc) + return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] + + if np.max(loc) >= self.values.shape[0]: + raise IndexError + + # Add one out-of-bounds indexer as maximum to collect + # all columns after our last indexer if any + loc = np.concatenate([loc, [self.values.shape[0]]]) + mgr_locs_arr = self._mgr_locs.as_array + new_blocks: list[Block] = [] + + previous_loc = -1 + # TODO(CoW): This is tricky, if parent block goes out of scope + # all split blocks are referencing each other even though they + # don't share data + refs = self.refs if self.refs.has_reference() else None + for idx in loc: + if idx == previous_loc + 1: + # There is no column between current and last idx + pass + else: + # No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[slice, slice]" + values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] + locs = mgr_locs_arr[previous_loc + 1 : idx] + nb = type(self)( + values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs + ) + new_blocks.append(nb) + + previous_loc = idx + + return new_blocks + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + raise AbstractMethodError(self) + + @property + def array_values(self) -> ExtensionArray: + """ + The array that Series.array returns. Always an ExtensionArray. + """ + raise AbstractMethodError(self) + + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + """ + return an internal format, currently just the ndarray + this is often overridden to handle to_dense like operations + """ + raise AbstractMethodError(self) + + +class EABackedBlock(Block): + """ + Mixin for Block subclasses backed by ExtensionArray. + """ + + values: ExtensionArray + + @final + def shift(self, periods: int, fill_value: Any = None) -> list[Block]: + """ + Shift the block by `periods`. + + Dispatches to underlying ExtensionArray and re-boxes in an + ExtensionBlock. + """ + # Transpose since EA.shift is always along axis=0, while we want to shift + # along rows. + new_values = self.values.T.shift(periods=periods, fill_value=fill_value).T + return [self.make_block_same_class(new_values)] + + @final + def setitem(self, indexer, value, using_cow: bool = False): + """ + Attempt self.values[indexer] = value, possibly creating a new array. + + This differs from Block.setitem by not allowing setitem to change + the dtype of the Block. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice, int + The subset of self.values to set + value : object + The value being set + using_cow: bool, default False + Signaling if CoW is used. + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + orig_indexer = indexer + orig_value = value + + indexer = self._unwrap_setitem_indexer(indexer) + value = self._maybe_squeeze_arg(value) + + values = self.values + if values.ndim == 2: + # TODO(GH#45419): string[pyarrow] tests break if we transpose + # unconditionally + values = values.T + check_setitem_lengths(indexer, value, values) + + try: + values[indexer] = value + except (ValueError, TypeError): + if isinstance(self.dtype, IntervalDtype): + # see TestSetitemFloatIntervalWithIntIntervalValues + nb = self.coerce_to_target_dtype(orig_value, warn_on_upcast=True) + return nb.setitem(orig_indexer, orig_value) + + elif isinstance(self, NDArrayBackedExtensionBlock): + nb = self.coerce_to_target_dtype(orig_value, warn_on_upcast=True) + return nb.setitem(orig_indexer, orig_value) + + else: + raise + + else: + return self + + @final + def where( + self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False + ) -> list[Block]: + # _downcast private bc we only specify it when calling from fillna + arr = self.values.T + + cond = extract_bool_array(cond) + + orig_other = other + orig_cond = cond + other = self._maybe_squeeze_arg(other) + cond = self._maybe_squeeze_arg(cond) + + if other is lib.no_default: + other = self.fill_value + + icond, noop = validate_putmask(arr, ~cond) + if noop: + # GH#44181, GH#45135 + # Avoid a) raising for Interval/PeriodDtype and b) unnecessary object upcast + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] + + try: + res_values = arr._where(cond, other).T + except (ValueError, TypeError): + if self.ndim == 1 or self.shape[0] == 1: + if isinstance(self.dtype, IntervalDtype): + # TestSetitemFloatIntervalWithIntIntervalValues + blk = self.coerce_to_target_dtype(orig_other) + nbs = blk.where(orig_other, orig_cond, using_cow=using_cow) + return self._maybe_downcast( + nbs, downcast=_downcast, using_cow=using_cow, caller="where" + ) + + elif isinstance(self, NDArrayBackedExtensionBlock): + # NB: not (yet) the same as + # isinstance(values, NDArrayBackedExtensionArray) + blk = self.coerce_to_target_dtype(orig_other) + nbs = blk.where(orig_other, orig_cond, using_cow=using_cow) + return self._maybe_downcast( + nbs, downcast=_downcast, using_cow=using_cow, caller="where" + ) + + else: + raise + + else: + # Same pattern we use in Block.putmask + is_array = isinstance(orig_other, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = orig_other + if is_array: + # we have a different value per-column + n = orig_other[:, i : i + 1] + + submask = orig_cond[:, i : i + 1] + rbs = nb.where(n, submask, using_cow=using_cow) + res_blocks.extend(rbs) + return res_blocks + + nb = self.make_block_same_class(res_values) + return [nb] + + @final + def putmask( + self, mask, new, using_cow: bool = False, already_warned=None + ) -> list[Block]: + """ + See Block.putmask.__doc__ + """ + mask = extract_bool_array(mask) + if new is lib.no_default: + new = self.fill_value + + orig_new = new + orig_mask = mask + new = self._maybe_squeeze_arg(new) + mask = self._maybe_squeeze_arg(mask) + + if not mask.any(): + if using_cow: + return [self.copy(deep=False)] + return [self] + + if ( + warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + self = self._maybe_copy(using_cow, inplace=True) + values = self.values + if values.ndim == 2: + values = values.T + + try: + # Caller is responsible for ensuring matching lengths + values._putmask(mask, new) + except (TypeError, ValueError): + if self.ndim == 1 or self.shape[0] == 1: + if isinstance(self.dtype, IntervalDtype): + # Discussion about what we want to support in the general + # case GH#39584 + blk = self.coerce_to_target_dtype(orig_new, warn_on_upcast=True) + return blk.putmask(orig_mask, orig_new) + + elif isinstance(self, NDArrayBackedExtensionBlock): + # NB: not (yet) the same as + # isinstance(values, NDArrayBackedExtensionArray) + blk = self.coerce_to_target_dtype(orig_new, warn_on_upcast=True) + return blk.putmask(orig_mask, orig_new) + + else: + raise + + else: + # Same pattern we use in Block.putmask + is_array = isinstance(orig_new, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = orig_new + if is_array: + # we have a different value per-column + n = orig_new[:, i : i + 1] + + submask = orig_mask[:, i : i + 1] + rbs = nb.putmask(submask, n) + res_blocks.extend(rbs) + return res_blocks + + return [self] + + @final + def delete(self, loc) -> list[Block]: + # This will be unnecessary if/when __array_function__ is implemented + if self.ndim == 1: + values = self.values.delete(loc) + mgr_locs = self._mgr_locs.delete(loc) + return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] + elif self.values.ndim == 1: + # We get here through to_stata + return [] + return super().delete(loc) + + @final + @cache_readonly + def array_values(self) -> ExtensionArray: + return self.values + + @final + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + """ + return object dtype as boxed values, such as Timestamps/Timedelta + """ + values: ArrayLike = self.values + if dtype == _dtype_obj: + values = values.astype(object) + # TODO(EA2D): reshape not needed with 2D EAs + return np.asarray(values).reshape(self.shape) + + @final + def pad_or_backfill( + self, + *, + method: FillnaOptions, + axis: AxisInt = 0, + inplace: bool = False, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + values = self.values + + kwargs: dict[str, Any] = {"method": method, "limit": limit} + if "limit_area" in inspect.signature(values._pad_or_backfill).parameters: + kwargs["limit_area"] = limit_area + elif limit_area is not None: + raise NotImplementedError( + f"{type(values).__name__} does not implement limit_area " + "(added in pandas 2.2). 3rd-party ExtnsionArray authors " + "need to add this argument to _pad_or_backfill." + ) + + if values.ndim == 2 and axis == 1: + # NDArrayBackedExtensionArray.fillna assumes axis=0 + new_values = values.T._pad_or_backfill(**kwargs).T + else: + new_values = values._pad_or_backfill(**kwargs) + return [self.make_block_same_class(new_values)] + + +class ExtensionBlock(EABackedBlock): + """ + Block for holding extension types. + + Notes + ----- + This holds all 3rd-party extension array types. It's also the immediate + parent class for our internal extension types' blocks. + + ExtensionArrays are limited to 1-D. + """ + + values: ExtensionArray + + def fillna( + self, + value, + limit: int | None = None, + inplace: bool = False, + downcast=None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + if isinstance(self.dtype, IntervalDtype): + # Block.fillna handles coercion (test_fillna_interval) + return super().fillna( + value=value, + limit=limit, + inplace=inplace, + downcast=downcast, + using_cow=using_cow, + already_warned=already_warned, + ) + if using_cow and self._can_hold_na and not self.values._hasna: + refs = self.refs + new_values = self.values + else: + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + try: + new_values = self.values.fillna( + value=value, method=None, limit=limit, copy=copy + ) + except TypeError: + # 3rd party EA that has not implemented copy keyword yet + refs = None + new_values = self.values.fillna(value=value, method=None, limit=limit) + # issue the warning *after* retrying, in case the TypeError + # was caused by an invalid fill_value + warnings.warn( + # GH#53278 + "ExtensionArray.fillna added a 'copy' keyword in pandas " + "2.1.0. In a future version, ExtensionArray subclasses will " + "need to implement this keyword or an exception will be " + "raised. In the interim, the keyword is ignored by " + f"{type(self.values).__name__}.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + else: + if ( + not copy + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + nb = self.make_block_same_class(new_values, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow=using_cow, caller="fillna") + + @cache_readonly + def shape(self) -> Shape: + # TODO(EA2D): override unnecessary with 2D EAs + if self.ndim == 1: + return (len(self.values),) + return len(self._mgr_locs), len(self.values) + + def iget(self, i: int | tuple[int, int] | tuple[slice, int]): + # In the case where we have a tuple[slice, int], the slice will always + # be slice(None) + # We _could_ make the annotation more specific, but mypy would + # complain about override mismatch: + # Literal[0] | tuple[Literal[0], int] | tuple[slice, int] + + # Note: only reached with self.ndim == 2 + + if isinstance(i, tuple): + # TODO(EA2D): unnecessary with 2D EAs + col, loc = i + if not com.is_null_slice(col) and col != 0: + raise IndexError(f"{self} only contains one item") + if isinstance(col, slice): + # the is_null_slice check above assures that col is slice(None) + # so what we want is a view on all our columns and row loc + if loc < 0: + loc += len(self.values) + # Note: loc:loc+1 vs [[loc]] makes a difference when called + # from fast_xs because we want to get a view back. + return self.values[loc : loc + 1] + return self.values[loc] + else: + if i != 0: + raise IndexError(f"{self} only contains one item") + return self.values + + def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: + # When an ndarray, we should have locs.tolist() == [0] + # When a BlockPlacement we should have list(locs) == [0] + if copy: + self.values = self.values.copy() + self.values[:] = values + + def _maybe_squeeze_arg(self, arg): + """ + If necessary, squeeze a (N, 1) ndarray to (N,) + """ + # e.g. if we are passed a 2D mask for putmask + if ( + isinstance(arg, (np.ndarray, ExtensionArray)) + and arg.ndim == self.values.ndim + 1 + ): + # TODO(EA2D): unnecessary with 2D EAs + assert arg.shape[1] == 1 + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[slice, int]" + arg = arg[:, 0] # type: ignore[call-overload] + elif isinstance(arg, ABCDataFrame): + # 2022-01-06 only reached for setitem + # TODO: should we avoid getting here with DataFrame? + assert arg.shape[1] == 1 + arg = arg._ixs(0, axis=1)._values + + return arg + + def _unwrap_setitem_indexer(self, indexer): + """ + Adapt a 2D-indexer to our 1D values. + + This is intended for 'setitem', not 'iget' or '_slice'. + """ + # TODO: ATM this doesn't work for iget/_slice, can we change that? + + if isinstance(indexer, tuple) and len(indexer) == 2: + # TODO(EA2D): not needed with 2D EAs + # Should never have length > 2. Caller is responsible for checking. + # Length 1 is reached vis setitem_single_block and setitem_single_column + # each of which pass indexer=(pi,) + if all(isinstance(x, np.ndarray) and x.ndim == 2 for x in indexer): + # GH#44703 went through indexing.maybe_convert_ix + first, second = indexer + if not ( + second.size == 1 and (second == 0).all() and first.shape[1] == 1 + ): + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/" + ) + indexer = first[:, 0] + + elif lib.is_integer(indexer[1]) and indexer[1] == 0: + # reached via setitem_single_block passing the whole indexer + indexer = indexer[0] + + elif com.is_null_slice(indexer[1]): + indexer = indexer[0] + + elif is_list_like(indexer[1]) and indexer[1][0] == 0: + indexer = indexer[0] + + else: + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/" + ) + return indexer + + @property + def is_view(self) -> bool: + """Extension arrays are never treated as views.""" + return False + + # error: Cannot override writeable attribute with read-only property + @cache_readonly + def is_numeric(self) -> bool: # type: ignore[override] + return self.values.dtype._is_numeric + + def _slice( + self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] + ) -> ExtensionArray: + """ + Return a slice of my values. + + Parameters + ---------- + slicer : slice, ndarray[int], or ndarray[bool] + Valid (non-reducing) indexer for self.values. + + Returns + ------- + ExtensionArray + """ + # Notes: ndarray[bool] is only reachable when via get_rows_with_mask, which + # is only for Series, i.e. self.ndim == 1. + + # return same dims as we currently have + if self.ndim == 2: + # reached via getitem_block via _slice_take_blocks_ax0 + # TODO(EA2D): won't be necessary with 2D EAs + + if not isinstance(slicer, slice): + raise AssertionError( + "invalid slicing for a 1-ndim ExtensionArray", slicer + ) + # GH#32959 only full-slicers along fake-dim0 are valid + # TODO(EA2D): won't be necessary with 2D EAs + # range(1) instead of self._mgr_locs to avoid exception on [::-1] + # see test_iloc_getitem_slice_negative_step_ea_block + new_locs = range(1)[slicer] + if not len(new_locs): + raise AssertionError( + "invalid slicing for a 1-ndim ExtensionArray", slicer + ) + slicer = slice(None) + + return self.values[slicer] + + @final + def slice_block_rows(self, slicer: slice) -> Self: + """ + Perform __getitem__-like specialized to slicing along index. + """ + # GH#42787 in principle this is equivalent to values[..., slicer], but we don't + # require subclasses of ExtensionArray to support that form (for now). + new_values = self.values[slicer] + return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs) + + def _unstack( + self, + unstacker, + fill_value, + new_placement: npt.NDArray[np.intp], + needs_masking: npt.NDArray[np.bool_], + ): + # ExtensionArray-safe unstack. + # We override Block._unstack, which unstacks directly on the + # values of the array. For EA-backed blocks, this would require + # converting to a 2-D ndarray of objects. + # Instead, we unstack an ndarray of integer positions, followed by + # a `take` on the actual values. + + # Caller is responsible for ensuring self.shape[-1] == len(unstacker.index) + new_values, mask = unstacker.arange_result + + # Note: these next two lines ensure that + # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) + # which the calling function needs in order to pass verify_integrity=False + # to the BlockManager constructor + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + # needs_masking[i] calculated once in BlockManager.unstack tells + # us if there are any -1s in the relevant indices. When False, + # that allows us to go through a faster path in 'take', among + # other things avoiding e.g. Categorical._validate_scalar. + blocks = [ + # TODO: could cast to object depending on fill_value? + type(self)( + self.values.take( + indices, allow_fill=needs_masking[i], fill_value=fill_value + ), + BlockPlacement(place), + ndim=2, + ) + for i, (indices, place) in enumerate(zip(new_values, new_placement)) + ] + return blocks, mask + + +class NumpyBlock(Block): + values: np.ndarray + __slots__ = () + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + return self.values.base is not None + + @property + def array_values(self) -> ExtensionArray: + return NumpyExtensionArray(self.values) + + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + if dtype == _dtype_obj: + return self.values.astype(_dtype_obj) + return self.values + + @cache_readonly + def is_numeric(self) -> bool: # type: ignore[override] + dtype = self.values.dtype + kind = dtype.kind + + return kind in "fciub" + + +class NumericBlock(NumpyBlock): + # this Block type is kept for backwards-compatibility + # TODO(3.0): delete and remove deprecation in __init__.py. + __slots__ = () + + +class ObjectBlock(NumpyBlock): + # this Block type is kept for backwards-compatibility + # TODO(3.0): delete and remove deprecation in __init__.py. + __slots__ = () + + +class NDArrayBackedExtensionBlock(EABackedBlock): + """ + Block backed by an NDArrayBackedExtensionArray + """ + + values: NDArrayBackedExtensionArray + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + # check the ndarray values of the DatetimeIndex values + return self.values._ndarray.base is not None + + +class DatetimeLikeBlock(NDArrayBackedExtensionBlock): + """Block for datetime64[ns], timedelta64[ns].""" + + __slots__ = () + is_numeric = False + values: DatetimeArray | TimedeltaArray + + +class DatetimeTZBlock(DatetimeLikeBlock): + """implement a datetime64 block with a tz attribute""" + + values: DatetimeArray + + __slots__ = () + + +# ----------------------------------------------------------------- +# Constructor Helpers + + +def maybe_coerce_values(values: ArrayLike) -> ArrayLike: + """ + Input validation for values passed to __init__. Ensure that + any datetime64/timedelta64 dtypes are in nanoseconds. Ensure + that we do not have string dtypes. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + + Returns + ------- + values : np.ndarray or ExtensionArray + """ + # Caller is responsible for ensuring NumpyExtensionArray is already extracted. + + if isinstance(values, np.ndarray): + values = ensure_wrapped_if_datetimelike(values) + + if issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None: + # freq is only stored in DatetimeIndex/TimedeltaIndex, not in Series/DataFrame + values = values._with_freq(None) + + return values + + +def get_block_type(dtype: DtypeObj) -> type[Block]: + """ + Find the appropriate Block subclass to use for the given values and dtype. + + Parameters + ---------- + dtype : numpy or pandas dtype + + Returns + ------- + cls : class, subclass of Block + """ + if isinstance(dtype, DatetimeTZDtype): + return DatetimeTZBlock + elif isinstance(dtype, PeriodDtype): + return NDArrayBackedExtensionBlock + elif isinstance(dtype, ExtensionDtype): + # Note: need to be sure NumpyExtensionArray is unwrapped before we get here + return ExtensionBlock + + # We use kind checks because it is much more performant + # than is_foo_dtype + kind = dtype.kind + if kind in "Mm": + return DatetimeLikeBlock + + return NumpyBlock + + +def new_block_2d( + values: ArrayLike, placement: BlockPlacement, refs: BlockValuesRefs | None = None +): + # new_block specialized to case with + # ndim=2 + # isinstance(placement, BlockPlacement) + # check_ndim/ensure_block_shape already checked + klass = get_block_type(values.dtype) + + values = maybe_coerce_values(values) + return klass(values, ndim=2, placement=placement, refs=refs) + + +def new_block( + values, + placement: BlockPlacement, + *, + ndim: int, + refs: BlockValuesRefs | None = None, +) -> Block: + # caller is responsible for ensuring: + # - values is NOT a NumpyExtensionArray + # - check_ndim/ensure_block_shape already checked + # - maybe_coerce_values already called/unnecessary + klass = get_block_type(values.dtype) + return klass(values, ndim=ndim, placement=placement, refs=refs) + + +def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: + """ + ndim inference and validation. + + Validates that values.ndim and ndim are consistent. + Validates that len(values) and len(placement) are consistent. + + Parameters + ---------- + values : array-like + placement : BlockPlacement + ndim : int + + Raises + ------ + ValueError : the number of dimensions do not match + """ + + if values.ndim > ndim: + # Check for both np.ndarray and ExtensionArray + raise ValueError( + "Wrong number of dimensions. " + f"values.ndim > ndim [{values.ndim} > {ndim}]" + ) + + if not is_1d_only_ea_dtype(values.dtype): + # TODO(EA2D): special case not needed with 2D EAs + if values.ndim != ndim: + raise ValueError( + "Wrong number of dimensions. " + f"values.ndim != ndim [{values.ndim} != {ndim}]" + ) + if len(placement) != len(values): + raise ValueError( + f"Wrong number of items passed {len(values)}, " + f"placement implies {len(placement)}" + ) + elif ndim == 2 and len(placement) != 1: + # TODO(EA2D): special case unnecessary with 2D EAs + raise ValueError("need to split") + + +def extract_pandas_array( + values: ArrayLike, dtype: DtypeObj | None, ndim: int +) -> tuple[ArrayLike, DtypeObj | None]: + """ + Ensure that we don't allow NumpyExtensionArray / NumpyEADtype in internals. + """ + # For now, blocks should be backed by ndarrays when possible. + if isinstance(values, ABCNumpyExtensionArray): + values = values.to_numpy() + if ndim and ndim > 1: + # TODO(EA2D): special case not needed with 2D EAs + values = np.atleast_2d(values) + + if isinstance(dtype, NumpyEADtype): + dtype = dtype.numpy_dtype + + return values, dtype + + +# ----------------------------------------------------------------- + + +def extend_blocks(result, blocks=None) -> list[Block]: + """return a new extended blocks, given the result""" + if blocks is None: + blocks = [] + if isinstance(result, list): + for r in result: + if isinstance(r, list): + blocks.extend(r) + else: + blocks.append(r) + else: + assert isinstance(result, Block), type(result) + blocks.append(result) + return blocks + + +def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: + """ + Reshape if possible to have values.ndim == ndim. + """ + + if values.ndim < ndim: + if not is_1d_only_ea_dtype(values.dtype): + # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 + # block.shape is incorrect for "2D" ExtensionArrays + # We can't, and don't need to, reshape. + values = cast("np.ndarray | DatetimeArray | TimedeltaArray", values) + values = values.reshape(1, -1) + + return values + + +def external_values(values: ArrayLike) -> ArrayLike: + """ + The array that Series.values returns (public attribute). + + This has some historical constraints, and is overridden in block + subclasses to return the correct array (e.g. period returns + object ndarray and datetimetz a datetime64[ns] ndarray instead of + proper extension array). + """ + if isinstance(values, (PeriodArray, IntervalArray)): + return values.astype(object) + elif isinstance(values, (DatetimeArray, TimedeltaArray)): + # NB: for datetime64tz this is different from np.asarray(values), since + # that returns an object-dtype ndarray of Timestamps. + # Avoid raising in .astype in casting from dt64tz to dt64 + values = values._ndarray + + if isinstance(values, np.ndarray) and using_copy_on_write(): + values = values.view() + values.flags.writeable = False + + # TODO(CoW) we should also mark our ExtensionArrays as read-only + + return values diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/concat.py b/venv/lib/python3.10/site-packages/pandas/core/internals/concat.py new file mode 100644 index 0000000000000000000000000000000000000000..b2d463a8c6c26f62ded5a06283f29275612c9b40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/concat.py @@ -0,0 +1,598 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + NaT, + algos as libalgos, + internals as libinternals, + lib, +) +from pandas._libs.missing import NA +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + ensure_dtype_can_hold_na, + find_common_type, +) +from pandas.core.dtypes.common import ( + is_1d_only_ea_dtype, + is_scalar, + needs_i8_conversion, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + SparseDtype, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + isna_all, +) + +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.internals.array_manager import ArrayManager +from pandas.core.internals.blocks import ( + ensure_block_shape, + new_block_2d, +) +from pandas.core.internals.managers import ( + BlockManager, + make_na_array, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + Manager2D, + Shape, + ) + + from pandas import Index + from pandas.core.internals.blocks import ( + Block, + BlockPlacement, + ) + + +def _concatenate_array_managers( + mgrs: list[ArrayManager], axes: list[Index], concat_axis: AxisInt +) -> Manager2D: + """ + Concatenate array managers into one. + + Parameters + ---------- + mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + + Returns + ------- + ArrayManager + """ + if concat_axis == 1: + return mgrs[0].concat_vertical(mgrs, axes) + else: + # concatting along the columns -> combine reindexed arrays in a single manager + assert concat_axis == 0 + return mgrs[0].concat_horizontal(mgrs, axes) + + +def concatenate_managers( + mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool +) -> Manager2D: + """ + Concatenate block managers into one. + + Parameters + ---------- + mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + copy : bool + + Returns + ------- + BlockManager + """ + + needs_copy = copy and concat_axis == 0 + + # TODO(ArrayManager) this assumes that all managers are of the same type + if isinstance(mgrs_indexers[0][0], ArrayManager): + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + # error: Argument 1 to "_concatenate_array_managers" has incompatible + # type "List[BlockManager]"; expected "List[Union[ArrayManager, + # SingleArrayManager, BlockManager, SingleBlockManager]]" + return _concatenate_array_managers( + mgrs, axes, concat_axis # type: ignore[arg-type] + ) + + # Assertions disabled for performance + # for tup in mgrs_indexers: + # # caller is responsible for ensuring this + # indexers = tup[1] + # assert concat_axis not in indexers + + if concat_axis == 0: + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + return mgrs[0].concat_horizontal(mgrs, axes) + + if len(mgrs_indexers) > 0 and mgrs_indexers[0][0].nblocks > 0: + first_dtype = mgrs_indexers[0][0].blocks[0].dtype + if first_dtype in [np.float64, np.float32]: + # TODO: support more dtypes here. This will be simpler once + # JoinUnit.is_na behavior is deprecated. + if ( + all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in mgrs_indexers) + and len(mgrs_indexers) > 1 + ): + # Fastpath! + # Length restriction is just to avoid having to worry about 'copy' + shape = tuple(len(x) for x in axes) + nb = _concat_homogeneous_fastpath(mgrs_indexers, shape, first_dtype) + return BlockManager((nb,), axes) + + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + + if len(mgrs) == 1: + mgr = mgrs[0] + out = mgr.copy(deep=False) + out.axes = axes + return out + + concat_plan = _get_combined_plan(mgrs) + + blocks = [] + values: ArrayLike + + for placement, join_units in concat_plan: + unit = join_units[0] + blk = unit.block + + if _is_uniform_join_units(join_units): + vals = [ju.block.values for ju in join_units] + + if not blk.is_extension: + # _is_uniform_join_units ensures a single dtype, so + # we can use np.concatenate, which is more performant + # than concat_compat + # error: Argument 1 to "concatenate" has incompatible type + # "List[Union[ndarray[Any, Any], ExtensionArray]]"; + # expected "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]]]" + values = np.concatenate(vals, axis=1) # type: ignore[arg-type] + elif is_1d_only_ea_dtype(blk.dtype): + # TODO(EA2D): special-casing not needed with 2D EAs + values = concat_compat(vals, axis=0, ea_compat_axis=True) + values = ensure_block_shape(values, ndim=2) + else: + values = concat_compat(vals, axis=1) + + values = ensure_wrapped_if_datetimelike(values) + + fastpath = blk.values.dtype == values.dtype + else: + values = _concatenate_join_units(join_units, copy=copy) + fastpath = False + + if fastpath: + b = blk.make_block_same_class(values, placement=placement) + else: + b = new_block_2d(values, placement=placement) + + blocks.append(b) + + return BlockManager(tuple(blocks), axes) + + +def _maybe_reindex_columns_na_proxy( + axes: list[Index], + mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]], + needs_copy: bool, +) -> list[BlockManager]: + """ + Reindex along columns so that all of the BlockManagers being concatenated + have matching columns. + + Columns added in this reindexing have dtype=np.void, indicating they + should be ignored when choosing a column's final dtype. + """ + new_mgrs = [] + + for mgr, indexers in mgrs_indexers: + # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this + # is a cheap reindexing. + for i, indexer in indexers.items(): + mgr = mgr.reindex_indexer( + axes[i], + indexers[i], + axis=i, + copy=False, + only_slice=True, # only relevant for i==0 + allow_dups=True, + use_na_proxy=True, # only relevant for i==0 + ) + if needs_copy and not indexers: + mgr = mgr.copy() + + new_mgrs.append(mgr) + return new_mgrs + + +def _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool: + """ + Check if this Manager can be treated as a single ndarray. + """ + if mgr.nblocks != 1: + return False + blk = mgr.blocks[0] + if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1): + return False + + return blk.dtype == first_dtype + + +def _concat_homogeneous_fastpath( + mgrs_indexers, shape: Shape, first_dtype: np.dtype +) -> Block: + """ + With single-Block managers with homogeneous dtypes (that can already hold nan), + we avoid [...] + """ + # assumes + # all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in in mgrs_indexers) + + if all(not indexers for _, indexers in mgrs_indexers): + # https://github.com/pandas-dev/pandas/pull/52685#issuecomment-1523287739 + arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers] + arr = np.concatenate(arrs).T + bp = libinternals.BlockPlacement(slice(shape[0])) + nb = new_block_2d(arr, bp) + return nb + + arr = np.empty(shape, dtype=first_dtype) + + if first_dtype == np.float64: + take_func = libalgos.take_2d_axis0_float64_float64 + else: + take_func = libalgos.take_2d_axis0_float32_float32 + + start = 0 + for mgr, indexers in mgrs_indexers: + mgr_len = mgr.shape[1] + end = start + mgr_len + + if 0 in indexers: + take_func( + mgr.blocks[0].values, + indexers[0], + arr[:, start:end], + ) + else: + # No reindexing necessary, we can copy values directly + arr[:, start:end] = mgr.blocks[0].values + + start += mgr_len + + bp = libinternals.BlockPlacement(slice(shape[0])) + nb = new_block_2d(arr, bp) + return nb + + +def _get_combined_plan( + mgrs: list[BlockManager], +) -> list[tuple[BlockPlacement, list[JoinUnit]]]: + plan = [] + + max_len = mgrs[0].shape[0] + + blknos_list = [mgr.blknos for mgr in mgrs] + pairs = libinternals.get_concat_blkno_indexers(blknos_list) + for ind, (blknos, bp) in enumerate(pairs): + # assert bp.is_slice_like + # assert len(bp) > 0 + + units_for_bp = [] + for k, mgr in enumerate(mgrs): + blkno = blknos[k] + + nb = _get_block_for_concat_plan(mgr, bp, blkno, max_len=max_len) + unit = JoinUnit(nb) + units_for_bp.append(unit) + + plan.append((bp, units_for_bp)) + + return plan + + +def _get_block_for_concat_plan( + mgr: BlockManager, bp: BlockPlacement, blkno: int, *, max_len: int +) -> Block: + blk = mgr.blocks[blkno] + # Assertions disabled for performance: + # assert bp.is_slice_like + # assert blkno != -1 + # assert (mgr.blknos[bp] == blkno).all() + + if len(bp) == len(blk.mgr_locs) and ( + blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1 + ): + nb = blk + else: + ax0_blk_indexer = mgr.blklocs[bp.indexer] + + slc = lib.maybe_indices_to_slice(ax0_blk_indexer, max_len) + # TODO: in all extant test cases 2023-04-08 we have a slice here. + # Will this always be the case? + if isinstance(slc, slice): + nb = blk.slice_block_columns(slc) + else: + nb = blk.take_block_columns(slc) + + # assert nb.shape == (len(bp), mgr.shape[1]) + return nb + + +class JoinUnit: + def __init__(self, block: Block) -> None: + self.block = block + + def __repr__(self) -> str: + return f"{type(self).__name__}({repr(self.block)})" + + def _is_valid_na_for(self, dtype: DtypeObj) -> bool: + """ + Check that we are all-NA of a type/dtype that is compatible with this dtype. + Augments `self.is_na` with an additional check of the type of NA values. + """ + if not self.is_na: + return False + + blk = self.block + if blk.dtype.kind == "V": + return True + + if blk.dtype == object: + values = blk.values + return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K")) + + na_value = blk.fill_value + if na_value is NaT and blk.dtype != dtype: + # e.g. we are dt64 and other is td64 + # fill_values match but we should not cast blk.values to dtype + # TODO: this will need updating if we ever have non-nano dt64/td64 + return False + + if na_value is NA and needs_i8_conversion(dtype): + # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat + # e.g. blk.dtype == "Int64" and dtype is td64, we dont want + # to consider these as matching + return False + + # TODO: better to use can_hold_element? + return is_valid_na_for_dtype(na_value, dtype) + + @cache_readonly + def is_na(self) -> bool: + blk = self.block + if blk.dtype.kind == "V": + return True + + if not blk._can_hold_na: + return False + + values = blk.values + if values.size == 0: + # GH#39122 this case will return False once deprecation is enforced + return True + + if isinstance(values.dtype, SparseDtype): + return False + + if values.ndim == 1: + # TODO(EA2D): no need for special case with 2D EAs + val = values[0] + if not is_scalar(val) or not isna(val): + # ideally isna_all would do this short-circuiting + return False + return isna_all(values) + else: + val = values[0][0] + if not is_scalar(val) or not isna(val): + # ideally isna_all would do this short-circuiting + return False + return all(isna_all(row) for row in values) + + @cache_readonly + def is_na_after_size_and_isna_all_deprecation(self) -> bool: + """ + Will self.is_na be True after values.size == 0 deprecation and isna_all + deprecation are enforced? + """ + blk = self.block + if blk.dtype.kind == "V": + return True + return False + + def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: + values: ArrayLike + + if upcasted_na is None and self.block.dtype.kind != "V": + # No upcasting is necessary + return self.block.values + else: + fill_value = upcasted_na + + if self._is_valid_na_for(empty_dtype): + # note: always holds when self.block.dtype.kind == "V" + blk_dtype = self.block.dtype + + if blk_dtype == np.dtype("object"): + # we want to avoid filling with np.nan if we are + # using None; we already know that we are all + # nulls + values = cast(np.ndarray, self.block.values) + if values.size and values[0, 0] is None: + fill_value = None + + return make_na_array(empty_dtype, self.block.shape, fill_value) + + return self.block.values + + +def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike: + """ + Concatenate values from several join units along axis=1. + """ + empty_dtype, empty_dtype_future = _get_empty_dtype(join_units) + + has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) + upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks) + + to_concat = [ + ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na) + for ju in join_units + ] + + if any(is_1d_only_ea_dtype(t.dtype) for t in to_concat): + # TODO(EA2D): special case not needed if all EAs used HybridBlocks + + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[int, slice]" + to_concat = [ + t + if is_1d_only_ea_dtype(t.dtype) + else t[0, :] # type: ignore[call-overload] + for t in to_concat + ] + concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) + concat_values = ensure_block_shape(concat_values, 2) + + else: + concat_values = concat_compat(to_concat, axis=1) + + if empty_dtype != empty_dtype_future: + if empty_dtype == concat_values.dtype: + # GH#39122, GH#40893 + warnings.warn( + "The behavior of DataFrame concatenation with empty or all-NA " + "entries is deprecated. In a future version, this will no longer " + "exclude empty or all-NA columns when determining the result dtypes. " + "To retain the old behavior, exclude the relevant entries before " + "the concat operation.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return concat_values + + +def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool): + """ + Find the NA value to go with this dtype. + """ + if isinstance(dtype, ExtensionDtype): + return dtype.na_value + elif dtype.kind in "mM": + return dtype.type("NaT") + elif dtype.kind in "fc": + return dtype.type("NaN") + elif dtype.kind == "b": + # different from missing.na_value_for_dtype + return None + elif dtype.kind in "iu": + if not has_none_blocks: + # different from missing.na_value_for_dtype + return None + return np.nan + elif dtype.kind == "O": + return np.nan + raise NotImplementedError + + +def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> tuple[DtypeObj, DtypeObj]: + """ + Return dtype and N/A values to use when concatenating specified units. + + Returned N/A value may be None which means there was no casting involved. + + Returns + ------- + dtype + """ + if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]): + empty_dtype = join_units[0].block.dtype + return empty_dtype, empty_dtype + + has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) + + dtypes = [unit.block.dtype for unit in join_units if not unit.is_na] + if not len(dtypes): + dtypes = [ + unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V" + ] + + dtype = find_common_type(dtypes) + if has_none_blocks: + dtype = ensure_dtype_can_hold_na(dtype) + + dtype_future = dtype + if len(dtypes) != len(join_units): + dtypes_future = [ + unit.block.dtype + for unit in join_units + if not unit.is_na_after_size_and_isna_all_deprecation + ] + if not len(dtypes_future): + dtypes_future = [ + unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V" + ] + + if len(dtypes) != len(dtypes_future): + dtype_future = find_common_type(dtypes_future) + if has_none_blocks: + dtype_future = ensure_dtype_can_hold_na(dtype_future) + + return dtype, dtype_future + + +def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool: + """ + Check if the join units consist of blocks of uniform type that can + be concatenated using Block.concat_same_type instead of the generic + _concatenate_join_units (which uses `concat_compat`). + + """ + first = join_units[0].block + if first.dtype.kind == "V": + return False + return ( + # exclude cases where a) ju.block is None or b) we have e.g. Int64+int64 + all(type(ju.block) is type(first) for ju in join_units) + and + # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform + all( + ju.block.dtype == first.dtype + # GH#42092 we only want the dtype_equal check for non-numeric blocks + # (for now, may change but that would need a deprecation) + or ju.block.dtype.kind in "iub" + for ju in join_units + ) + and + # no blocks that would get missing values (can lead to type upcasts) + # unless we're an extension dtype. + all(not ju.is_na or ju.block.is_extension for ju in join_units) + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/construction.py b/venv/lib/python3.10/site-packages/pandas/core/internals/construction.py new file mode 100644 index 0000000000000000000000000000000000000000..609d2c9a7a285ec23569f9fa06067f0a5b0a00cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/construction.py @@ -0,0 +1,1072 @@ +""" +Functions for preparing various inputs passed to the DataFrame or Series +constructors before passing them to a BlockManager. +""" +from __future__ import annotations + +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np +from numpy import ma + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib + +from pandas.core.dtypes.astype import astype_is_view +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, + dict_compat, + maybe_cast_to_datetime, + maybe_convert_platform, + maybe_infer_to_datetimelike, +) +from pandas.core.dtypes.common import ( + is_1d_only_ea_dtype, + is_integer_dtype, + is_list_like, + is_named_tuple, + is_object_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core import ( + algorithms, + common as com, +) +from pandas.core.arrays import ExtensionArray +from pandas.core.arrays.string_ import StringDtype +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, + range_to_ndarray, + sanitize_array, +) +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + TimedeltaIndex, + default_index, + ensure_index, + get_objs_combined_axis, + union_indexes, +) +from pandas.core.internals.array_manager import ( + ArrayManager, + SingleArrayManager, +) +from pandas.core.internals.blocks import ( + BlockPlacement, + ensure_block_shape, + new_block, + new_block_2d, +) +from pandas.core.internals.managers import ( + BlockManager, + SingleBlockManager, + create_block_manager_from_blocks, + create_block_manager_from_column_arrays, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeObj, + Manager, + npt, + ) +# --------------------------------------------------------------------- +# BlockManager Interface + + +def arrays_to_mgr( + arrays, + columns: Index, + index, + *, + dtype: DtypeObj | None = None, + verify_integrity: bool = True, + typ: str | None = None, + consolidate: bool = True, +) -> Manager: + """ + Segregate Series based on type and coerce into matrices. + + Needs to handle a lot of exceptional cases. + """ + if verify_integrity: + # figure out the index, if necessary + if index is None: + index = _extract_index(arrays) + else: + index = ensure_index(index) + + # don't force copy because getting jammed in an ndarray anyway + arrays, refs = _homogenize(arrays, index, dtype) + # _homogenize ensures + # - all(len(x) == len(index) for x in arrays) + # - all(x.ndim == 1 for x in arrays) + # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) + # - all(type(x) is not NumpyExtensionArray for x in arrays) + + else: + index = ensure_index(index) + arrays = [extract_array(x, extract_numpy=True) for x in arrays] + # with _from_arrays, the passed arrays should never be Series objects + refs = [None] * len(arrays) + + # Reached via DataFrame._from_arrays; we do minimal validation here + for arr in arrays: + if ( + not isinstance(arr, (np.ndarray, ExtensionArray)) + or arr.ndim != 1 + or len(arr) != len(index) + ): + raise ValueError( + "Arrays must be 1-dimensional np.ndarray or ExtensionArray " + "with length matching len(index)" + ) + + columns = ensure_index(columns) + if len(columns) != len(arrays): + raise ValueError("len(arrays) must match len(columns)") + + # from BlockManager perspective + axes = [columns, index] + + if typ == "block": + return create_block_manager_from_column_arrays( + arrays, axes, consolidate=consolidate, refs=refs + ) + elif typ == "array": + return ArrayManager(arrays, [index, columns]) + else: + raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") + + +def rec_array_to_mgr( + data: np.rec.recarray | np.ndarray, + index, + columns, + dtype: DtypeObj | None, + copy: bool, + typ: str, +) -> Manager: + """ + Extract from a masked rec array and create the manager. + """ + # essentially process a record array then fill it + fdata = ma.getdata(data) + if index is None: + index = default_index(len(fdata)) + else: + index = ensure_index(index) + + if columns is not None: + columns = ensure_index(columns) + arrays, arr_columns = to_arrays(fdata, columns) + + # create the manager + + arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index)) + if columns is None: + columns = arr_columns + + mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ) + + if copy: + mgr = mgr.copy() + return mgr + + +def mgr_to_mgr(mgr, typ: str, copy: bool = True) -> Manager: + """ + Convert to specific type of Manager. Does not copy if the type is already + correct. Does not guarantee a copy otherwise. `copy` keyword only controls + whether conversion from Block->ArrayManager copies the 1D arrays. + """ + new_mgr: Manager + + if typ == "block": + if isinstance(mgr, BlockManager): + new_mgr = mgr + else: + if mgr.ndim == 2: + new_mgr = arrays_to_mgr( + mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block" + ) + else: + new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index) + elif typ == "array": + if isinstance(mgr, ArrayManager): + new_mgr = mgr + else: + if mgr.ndim == 2: + arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))] + if copy: + arrays = [arr.copy() for arr in arrays] + new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]]) + else: + array = mgr.internal_values() + if copy: + array = array.copy() + new_mgr = SingleArrayManager([array], [mgr.index]) + else: + raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") + return new_mgr + + +# --------------------------------------------------------------------- +# DataFrame Constructor Interface + + +def ndarray_to_mgr( + values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str +) -> Manager: + # used in DataFrame.__init__ + # input must be a ndarray, list, Series, Index, ExtensionArray + + if isinstance(values, ABCSeries): + if columns is None: + if values.name is not None: + columns = Index([values.name]) + if index is None: + index = values.index + else: + values = values.reindex(index) + + # zero len case (GH #2234) + if not len(values) and columns is not None and len(columns): + values = np.empty((0, 1), dtype=object) + + # if the array preparation does a copy -> avoid this for ArrayManager, + # since the copy is done on conversion to 1D arrays + copy_on_sanitize = False if typ == "array" else copy + + vdtype = getattr(values, "dtype", None) + refs = None + if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype): + # GH#19157 + + if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1: + # GH#12513 a EA dtype passed with a 2D array, split into + # multiple EAs that view the values + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[slice, int]" + values = [ + values[:, n] # type: ignore[call-overload] + for n in range(values.shape[1]) + ] + else: + values = [values] + + if columns is None: + columns = Index(range(len(values))) + else: + columns = ensure_index(columns) + + return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ) + + elif isinstance(vdtype, ExtensionDtype): + # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype) + # are already caught above + values = extract_array(values, extract_numpy=True) + if copy: + values = values.copy() + if values.ndim == 1: + values = values.reshape(-1, 1) + + elif isinstance(values, (ABCSeries, Index)): + if not copy_on_sanitize and ( + dtype is None or astype_is_view(values.dtype, dtype) + ): + refs = values._references + + if copy_on_sanitize: + values = values._values.copy() + else: + values = values._values + + values = _ensure_2d(values) + + elif isinstance(values, (np.ndarray, ExtensionArray)): + # drop subclass info + _copy = ( + copy_on_sanitize + if (dtype is None or astype_is_view(values.dtype, dtype)) + else False + ) + values = np.array(values, copy=_copy) + values = _ensure_2d(values) + + else: + # by definition an array here + # the dtypes will be coerced to a single dtype + values = _prep_ndarraylike(values, copy=copy_on_sanitize) + + if dtype is not None and values.dtype != dtype: + # GH#40110 see similar check inside sanitize_array + values = sanitize_array( + values, + None, + dtype=dtype, + copy=copy_on_sanitize, + allow_2d=True, + ) + + # _prep_ndarraylike ensures that values.ndim == 2 at this point + index, columns = _get_axes( + values.shape[0], values.shape[1], index=index, columns=columns + ) + + _check_values_indices_shape_match(values, index, columns) + + if typ == "array": + if issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + if dtype is None and is_object_dtype(values.dtype): + arrays = [ + ensure_wrapped_if_datetimelike( + maybe_infer_to_datetimelike(values[:, i]) + ) + for i in range(values.shape[1]) + ] + else: + if lib.is_np_dtype(values.dtype, "mM"): + values = ensure_wrapped_if_datetimelike(values) + arrays = [values[:, i] for i in range(values.shape[1])] + + if copy: + arrays = [arr.copy() for arr in arrays] + + return ArrayManager(arrays, [index, columns], verify_integrity=False) + + values = values.T + + # if we don't have a dtype specified, then try to convert objects + # on the entire block; this is to convert if we have datetimelike's + # embedded in an object type + if dtype is None and is_object_dtype(values.dtype): + obj_columns = list(values) + maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns] + # don't convert (and copy) the objects if no type inference occurs + if any(x is not y for x, y in zip(obj_columns, maybe_datetime)): + dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime] + block_values = [ + new_block_2d(dvals_list[n], placement=BlockPlacement(n)) + for n in range(len(dvals_list)) + ] + else: + bp = BlockPlacement(slice(len(columns))) + nb = new_block_2d(values, placement=bp, refs=refs) + block_values = [nb] + elif dtype is None and values.dtype.kind == "U" and using_pyarrow_string_dtype(): + dtype = StringDtype(storage="pyarrow_numpy") + + obj_columns = list(values) + block_values = [ + new_block( + dtype.construct_array_type()._from_sequence(data, dtype=dtype), + BlockPlacement(slice(i, i + 1)), + ndim=2, + ) + for i, data in enumerate(obj_columns) + ] + + else: + bp = BlockPlacement(slice(len(columns))) + nb = new_block_2d(values, placement=bp, refs=refs) + block_values = [nb] + + if len(columns) == 0: + # TODO: check len(values) == 0? + block_values = [] + + return create_block_manager_from_blocks( + block_values, [columns, index], verify_integrity=False + ) + + +def _check_values_indices_shape_match( + values: np.ndarray, index: Index, columns: Index +) -> None: + """ + Check that the shape implied by our axes matches the actual shape of the + data. + """ + if values.shape[1] != len(columns) or values.shape[0] != len(index): + # Could let this raise in Block constructor, but we get a more + # helpful exception message this way. + if values.shape[0] == 0 < len(index): + raise ValueError("Empty data passed with indices specified.") + + passed = values.shape + implied = (len(index), len(columns)) + raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}") + + +def dict_to_mgr( + data: dict, + index, + columns, + *, + dtype: DtypeObj | None = None, + typ: str = "block", + copy: bool = True, +) -> Manager: + """ + Segregate Series based on type and coerce into matrices. + Needs to handle a lot of exceptional cases. + + Used in DataFrame.__init__ + """ + arrays: Sequence[Any] | Series + + if columns is not None: + from pandas.core.series import Series + + arrays = Series(data, index=columns, dtype=object) + missing = arrays.isna() + if index is None: + # GH10856 + # raise ValueError if only scalars in dict + index = _extract_index(arrays[~missing]) + else: + index = ensure_index(index) + + # no obvious "empty" int column + if missing.any() and not is_integer_dtype(dtype): + nan_dtype: DtypeObj + + if dtype is not None: + # calling sanitize_array ensures we don't mix-and-match + # NA dtypes + midxs = missing.values.nonzero()[0] + for i in midxs: + arr = sanitize_array(arrays.iat[i], index, dtype=dtype) + arrays.iat[i] = arr + else: + # GH#1783 + nan_dtype = np.dtype("object") + val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) + nmissing = missing.sum() + if copy: + rhs = [val] * nmissing + else: + # GH#45369 + rhs = [val.copy() for _ in range(nmissing)] + arrays.loc[missing] = rhs + + arrays = list(arrays) + columns = ensure_index(columns) + + else: + keys = list(data.keys()) + columns = Index(keys) if keys else default_index(0) + arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] + + if copy: + if typ == "block": + # We only need to copy arrays that will not get consolidated, i.e. + # only EA arrays + arrays = [ + x.copy() + if isinstance(x, ExtensionArray) + else x.copy(deep=True) + if ( + isinstance(x, Index) + or isinstance(x, ABCSeries) + and is_1d_only_ea_dtype(x.dtype) + ) + else x + for x in arrays + ] + else: + # dtype check to exclude e.g. range objects, scalars + arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays] + + return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy) + + +def nested_data_to_arrays( + data: Sequence, + columns: Index | None, + index: Index | None, + dtype: DtypeObj | None, +) -> tuple[list[ArrayLike], Index, Index]: + """ + Convert a single sequence of arrays to multiple arrays. + """ + # By the time we get here we have already checked treat_as_nested(data) + + if is_named_tuple(data[0]) and columns is None: + columns = ensure_index(data[0]._fields) + + arrays, columns = to_arrays(data, columns, dtype=dtype) + columns = ensure_index(columns) + + if index is None: + if isinstance(data[0], ABCSeries): + index = _get_names_from_index(data) + else: + index = default_index(len(data)) + + return arrays, columns, index + + +def treat_as_nested(data) -> bool: + """ + Check if we should use nested_data_to_arrays. + """ + return ( + len(data) > 0 + and is_list_like(data[0]) + and getattr(data[0], "ndim", 1) == 1 + and not (isinstance(data, ExtensionArray) and data.ndim == 2) + ) + + +# --------------------------------------------------------------------- + + +def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray: + # values is specifically _not_ ndarray, EA, Index, or Series + # We only get here with `not treat_as_nested(values)` + + if len(values) == 0: + # TODO: check for length-zero range, in which case return int64 dtype? + # TODO: reuse anything in try_cast? + return np.empty((0, 0), dtype=object) + elif isinstance(values, range): + arr = range_to_ndarray(values) + return arr[..., np.newaxis] + + def convert(v): + if not is_list_like(v) or isinstance(v, ABCDataFrame): + return v + + v = extract_array(v, extract_numpy=True) + res = maybe_convert_platform(v) + # We don't do maybe_infer_to_datetimelike here bc we will end up doing + # it column-by-column in ndarray_to_mgr + return res + + # we could have a 1-dim or 2-dim list here + # this is equiv of np.asarray, but does object conversion + # and platform dtype preservation + # does not convert e.g. [1, "a", True] to ["1", "a", "True"] like + # np.asarray would + if is_list_like(values[0]): + values = np.array([convert(v) for v in values]) + elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: + # GH#21861 see test_constructor_list_of_lists + values = np.array([convert(v) for v in values]) + else: + values = convert(values) + + return _ensure_2d(values) + + +def _ensure_2d(values: np.ndarray) -> np.ndarray: + """ + Reshape 1D values, raise on anything else other than 2D. + """ + if values.ndim == 1: + values = values.reshape((values.shape[0], 1)) + elif values.ndim != 2: + raise ValueError(f"Must pass 2-d input. shape={values.shape}") + return values + + +def _homogenize( + data, index: Index, dtype: DtypeObj | None +) -> tuple[list[ArrayLike], list[Any]]: + oindex = None + homogenized = [] + # if the original array-like in `data` is a Series, keep track of this Series' refs + refs: list[Any] = [] + + for val in data: + if isinstance(val, (ABCSeries, Index)): + if dtype is not None: + val = val.astype(dtype, copy=False) + if isinstance(val, ABCSeries) and val.index is not index: + # Forces alignment. No need to copy data since we + # are putting it into an ndarray later + val = val.reindex(index, copy=False) + refs.append(val._references) + val = val._values + else: + if isinstance(val, dict): + # GH#41785 this _should_ be equivalent to (but faster than) + # val = Series(val, index=index)._values + if oindex is None: + oindex = index.astype("O") + + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + # see test_constructor_dict_datetime64_index + val = dict_compat(val) + else: + # see test_constructor_subclass_dict + val = dict(val) + val = lib.fast_multiget(val, oindex._values, default=np.nan) + + val = sanitize_array(val, index, dtype=dtype, copy=False) + com.require_length_match(val, index) + refs.append(None) + + homogenized.append(val) + + return homogenized, refs + + +def _extract_index(data) -> Index: + """ + Try to infer an Index from the passed data, raise ValueError on failure. + """ + index: Index + if len(data) == 0: + return default_index(0) + + raw_lengths = [] + indexes: list[list[Hashable] | Index] = [] + + have_raw_arrays = False + have_series = False + have_dicts = False + + for val in data: + if isinstance(val, ABCSeries): + have_series = True + indexes.append(val.index) + elif isinstance(val, dict): + have_dicts = True + indexes.append(list(val.keys())) + elif is_list_like(val) and getattr(val, "ndim", 1) == 1: + have_raw_arrays = True + raw_lengths.append(len(val)) + elif isinstance(val, np.ndarray) and val.ndim > 1: + raise ValueError("Per-column arrays must each be 1-dimensional") + + if not indexes and not raw_lengths: + raise ValueError("If using all scalar values, you must pass an index") + + if have_series: + index = union_indexes(indexes) + elif have_dicts: + index = union_indexes(indexes, sort=False) + + if have_raw_arrays: + lengths = list(set(raw_lengths)) + if len(lengths) > 1: + raise ValueError("All arrays must be of the same length") + + if have_dicts: + raise ValueError( + "Mixing dicts with non-Series may lead to ambiguous ordering." + ) + + if have_series: + if lengths[0] != len(index): + msg = ( + f"array length {lengths[0]} does not match index " + f"length {len(index)}" + ) + raise ValueError(msg) + else: + index = default_index(lengths[0]) + + return ensure_index(index) + + +def reorder_arrays( + arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int +) -> tuple[list[ArrayLike], Index]: + """ + Pre-emptively (cheaply) reindex arrays with new columns. + """ + # reorder according to the columns + if columns is not None: + if not columns.equals(arr_columns): + # if they are equal, there is nothing to do + new_arrays: list[ArrayLike] = [] + indexer = arr_columns.get_indexer(columns) + for i, k in enumerate(indexer): + if k == -1: + # by convention default is all-NaN object dtype + arr = np.empty(length, dtype=object) + arr.fill(np.nan) + else: + arr = arrays[k] + new_arrays.append(arr) + + arrays = new_arrays + arr_columns = columns + + return arrays, arr_columns + + +def _get_names_from_index(data) -> Index: + has_some_name = any(getattr(s, "name", None) is not None for s in data) + if not has_some_name: + return default_index(len(data)) + + index: list[Hashable] = list(range(len(data))) + count = 0 + for i, s in enumerate(data): + n = getattr(s, "name", None) + if n is not None: + index[i] = n + else: + index[i] = f"Unnamed {count}" + count += 1 + + return Index(index) + + +def _get_axes( + N: int, K: int, index: Index | None, columns: Index | None +) -> tuple[Index, Index]: + # helper to create the axes as indexes + # return axes or defaults + + if index is None: + index = default_index(N) + else: + index = ensure_index(index) + + if columns is None: + columns = default_index(K) + else: + columns = ensure_index(columns) + return index, columns + + +def dataclasses_to_dicts(data): + """ + Converts a list of dataclass instances to a list of dictionaries. + + Parameters + ---------- + data : List[Type[dataclass]] + + Returns + -------- + list_dict : List[dict] + + Examples + -------- + >>> from dataclasses import dataclass + >>> @dataclass + ... class Point: + ... x: int + ... y: int + + >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) + [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}] + + """ + from dataclasses import asdict + + return list(map(asdict, data)) + + +# --------------------------------------------------------------------- +# Conversion of Inputs to Arrays + + +def to_arrays( + data, columns: Index | None, dtype: DtypeObj | None = None +) -> tuple[list[ArrayLike], Index]: + """ + Return list of arrays, columns. + + Returns + ------- + list[ArrayLike] + These will become columns in a DataFrame. + Index + This will become frame.columns. + + Notes + ----- + Ensures that len(result_arrays) == len(result_index). + """ + + if not len(data): + if isinstance(data, np.ndarray): + if data.dtype.names is not None: + # i.e. numpy structured array + columns = ensure_index(data.dtype.names) + arrays = [data[name] for name in columns] + + if len(data) == 0: + # GH#42456 the indexing above results in list of 2D ndarrays + # TODO: is that an issue with numpy? + for i, arr in enumerate(arrays): + if arr.ndim == 2: + arrays[i] = arr[:, 0] + + return arrays, columns + return [], ensure_index([]) + + elif isinstance(data, np.ndarray) and data.dtype.names is not None: + # e.g. recarray + columns = Index(list(data.dtype.names)) + arrays = [data[k] for k in columns] + return arrays, columns + + if isinstance(data[0], (list, tuple)): + arr = _list_to_arrays(data) + elif isinstance(data[0], abc.Mapping): + arr, columns = _list_of_dict_to_arrays(data, columns) + elif isinstance(data[0], ABCSeries): + arr, columns = _list_of_series_to_arrays(data, columns) + else: + # last ditch effort + data = [tuple(x) for x in data] + arr = _list_to_arrays(data) + + content, columns = _finalize_columns_and_data(arr, columns, dtype) + return content, columns + + +def _list_to_arrays(data: list[tuple | list]) -> np.ndarray: + # Returned np.ndarray has ndim = 2 + # Note: we already check len(data) > 0 before getting hre + if isinstance(data[0], tuple): + content = lib.to_object_array_tuples(data) + else: + # list of lists + content = lib.to_object_array(data) + return content + + +def _list_of_series_to_arrays( + data: list, + columns: Index | None, +) -> tuple[np.ndarray, Index]: + # returned np.ndarray has ndim == 2 + + if columns is None: + # We know pass_data is non-empty because data[0] is a Series + pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))] + columns = get_objs_combined_axis(pass_data, sort=False) + + indexer_cache: dict[int, np.ndarray] = {} + + aligned_values = [] + for s in data: + index = getattr(s, "index", None) + if index is None: + index = default_index(len(s)) + + if id(index) in indexer_cache: + indexer = indexer_cache[id(index)] + else: + indexer = indexer_cache[id(index)] = index.get_indexer(columns) + + values = extract_array(s, extract_numpy=True) + aligned_values.append(algorithms.take_nd(values, indexer)) + + content = np.vstack(aligned_values) + return content, columns + + +def _list_of_dict_to_arrays( + data: list[dict], + columns: Index | None, +) -> tuple[np.ndarray, Index]: + """ + Convert list of dicts to numpy arrays + + if `columns` is not passed, column names are inferred from the records + - for OrderedDict and dicts, the column names match + the key insertion-order from the first record to the last. + - For other kinds of dict-likes, the keys are lexically sorted. + + Parameters + ---------- + data : iterable + collection of records (OrderedDict, dict) + columns: iterables or None + + Returns + ------- + content : np.ndarray[object, ndim=2] + columns : Index + """ + if columns is None: + gen = (list(x.keys()) for x in data) + sort = not any(isinstance(d, dict) for d in data) + pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort) + columns = ensure_index(pre_cols) + + # assure that they are of the base dict class and not of derived + # classes + data = [d if type(d) is dict else dict(d) for d in data] # noqa: E721 + + content = lib.dicts_to_array(data, list(columns)) + return content, columns + + +def _finalize_columns_and_data( + content: np.ndarray, # ndim == 2 + columns: Index | None, + dtype: DtypeObj | None, +) -> tuple[list[ArrayLike], Index]: + """ + Ensure we have valid columns, cast object dtypes if possible. + """ + contents = list(content.T) + + try: + columns = _validate_or_indexify_columns(contents, columns) + except AssertionError as err: + # GH#26429 do not raise user-facing AssertionError + raise ValueError(err) from err + + if len(contents) and contents[0].dtype == np.object_: + contents = convert_object_array(contents, dtype=dtype) + + return contents, columns + + +def _validate_or_indexify_columns( + content: list[np.ndarray], columns: Index | None +) -> Index: + """ + If columns is None, make numbers as column names; Otherwise, validate that + columns have valid length. + + Parameters + ---------- + content : list of np.ndarrays + columns : Index or None + + Returns + ------- + Index + If columns is None, assign positional column index value as columns. + + Raises + ------ + 1. AssertionError when content is not composed of list of lists, and if + length of columns is not equal to length of content. + 2. ValueError when content is list of lists, but length of each sub-list + is not equal + 3. ValueError when content is list of lists, but length of sub-list is + not equal to length of content + """ + if columns is None: + columns = default_index(len(content)) + else: + # Add mask for data which is composed of list of lists + is_mi_list = isinstance(columns, list) and all( + isinstance(col, list) for col in columns + ) + + if not is_mi_list and len(columns) != len(content): # pragma: no cover + # caller's responsibility to check for this... + raise AssertionError( + f"{len(columns)} columns passed, passed data had " + f"{len(content)} columns" + ) + if is_mi_list: + # check if nested list column, length of each sub-list should be equal + if len({len(col) for col in columns}) > 1: + raise ValueError( + "Length of columns passed for MultiIndex columns is different" + ) + + # if columns is not empty and length of sublist is not equal to content + if columns and len(columns[0]) != len(content): + raise ValueError( + f"{len(columns[0])} columns passed, passed data had " + f"{len(content)} columns" + ) + return columns + + +def convert_object_array( + content: list[npt.NDArray[np.object_]], + dtype: DtypeObj | None, + dtype_backend: str = "numpy", + coerce_float: bool = False, +) -> list[ArrayLike]: + """ + Internal function to convert object array. + + Parameters + ---------- + content: List[np.ndarray] + dtype: np.dtype or ExtensionDtype + dtype_backend: Controls if nullable/pyarrow dtypes are returned. + coerce_float: Cast floats that are integers to int. + + Returns + ------- + List[ArrayLike] + """ + # provide soft conversion of object dtypes + + def convert(arr): + if dtype != np.dtype("O"): + arr = lib.maybe_convert_objects( + arr, + try_float=coerce_float, + convert_to_nullable_dtype=dtype_backend != "numpy", + ) + # Notes on cases that get here 2023-02-15 + # 1) we DO get here when arr is all Timestamps and dtype=None + # 2) disabling this doesn't break the world, so this must be + # getting caught at a higher level + # 3) passing convert_non_numeric to maybe_convert_objects get this right + # 4) convert_non_numeric? + + if dtype is None: + if arr.dtype == np.dtype("O"): + # i.e. maybe_convert_objects didn't convert + arr = maybe_infer_to_datetimelike(arr) + if dtype_backend != "numpy" and arr.dtype == np.dtype("O"): + new_dtype = StringDtype() + arr_cls = new_dtype.construct_array_type() + arr = arr_cls._from_sequence(arr, dtype=new_dtype) + elif dtype_backend != "numpy" and isinstance(arr, np.ndarray): + if arr.dtype.kind in "iufb": + arr = pd_array(arr, copy=False) + + elif isinstance(dtype, ExtensionDtype): + # TODO: test(s) that get here + # TODO: try to de-duplicate this convert function with + # core.construction functions + cls = dtype.construct_array_type() + arr = cls._from_sequence(arr, dtype=dtype, copy=False) + elif dtype.kind in "mM": + # This restriction is harmless bc these are the only cases + # where maybe_cast_to_datetime is not a no-op. + # Here we know: + # 1) dtype.kind in "mM" and + # 2) arr is either object or numeric dtype + arr = maybe_cast_to_datetime(arr, dtype) + + return arr + + arrays = [convert(arr) for arr in content] + + return arrays diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/managers.py b/venv/lib/python3.10/site-packages/pandas/core/internals/managers.py new file mode 100644 index 0000000000000000000000000000000000000000..2e0e04717373fbe80490990d929c35130a7c733a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/managers.py @@ -0,0 +1,2375 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Sequence, +) +import itertools +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._config import ( + using_copy_on_write, + warn_copy_on_write, +) + +from pandas._libs import ( + internals as libinternals, + lib, +) +from pandas._libs.internals import ( + BlockPlacement, + BlockValuesRefs, +) +from pandas._libs.tslibs import Timestamp +from pandas.errors import PerformanceWarning +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import infer_dtype_from_scalar +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_dtype, + is_list_like, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + array_equals, + isna, +) + +import pandas.core.algorithms as algos +from pandas.core.arrays import ( + ArrowExtensionArray, + ArrowStringArray, + DatetimeArray, +) +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import maybe_convert_indices +from pandas.core.indexes.api import ( + Index, + ensure_index, +) +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, + ensure_np_dtype, + interleaved_dtype, +) +from pandas.core.internals.blocks import ( + COW_WARNING_GENERAL_MSG, + COW_WARNING_SETITEM_MSG, + Block, + NumpyBlock, + ensure_block_shape, + extend_blocks, + get_block_type, + maybe_coerce_values, + new_block, + new_block_2d, +) +from pandas.core.internals.ops import ( + blockwise_all, + operate_blockwise, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + QuantileInterpolation, + Self, + Shape, + npt, + ) + + from pandas.api.extensions import ExtensionArray + + +class BaseBlockManager(DataManager): + """ + Core internal data structure to implement DataFrame, Series, etc. + + Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a + lightweight blocked set of labeled data to be manipulated by the DataFrame + public API class + + Attributes + ---------- + shape + ndim + axes + values + items + + Methods + ------- + set_axis(axis, new_labels) + copy(deep=True) + + get_dtypes + + apply(func, axes, block_filter_fn) + + get_bool_data + get_numeric_data + + get_slice(slice_like, axis) + get(label) + iget(loc) + + take(indexer, axis) + reindex_axis(new_labels, axis) + reindex_indexer(new_labels, indexer, axis) + + delete(label) + insert(loc, label, value) + set(label, value) + + Parameters + ---------- + blocks: Sequence of Block + axes: Sequence of Index + verify_integrity: bool, default True + + Notes + ----- + This is *not* a public API class + """ + + __slots__ = () + + _blknos: npt.NDArray[np.intp] + _blklocs: npt.NDArray[np.intp] + blocks: tuple[Block, ...] + axes: list[Index] + + @property + def ndim(self) -> int: + raise NotImplementedError + + _known_consolidated: bool + _is_consolidated: bool + + def __init__(self, blocks, axes, verify_integrity: bool = True) -> None: + raise NotImplementedError + + @classmethod + def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: + raise NotImplementedError + + @property + def blknos(self) -> npt.NDArray[np.intp]: + """ + Suppose we want to find the array corresponding to our i'th column. + + blknos[i] identifies the block from self.blocks that contains this column. + + blklocs[i] identifies the column of interest within + self.blocks[self.blknos[i]] + """ + if self._blknos is None: + # Note: these can be altered by other BlockManager methods. + self._rebuild_blknos_and_blklocs() + + return self._blknos + + @property + def blklocs(self) -> npt.NDArray[np.intp]: + """ + See blknos.__doc__ + """ + if self._blklocs is None: + # Note: these can be altered by other BlockManager methods. + self._rebuild_blknos_and_blklocs() + + return self._blklocs + + def make_empty(self, axes=None) -> Self: + """return an empty BlockManager with the items axis of len 0""" + if axes is None: + axes = [Index([])] + self.axes[1:] + + # preserve dtype if possible + if self.ndim == 1: + assert isinstance(self, SingleBlockManager) # for mypy + blk = self.blocks[0] + arr = blk.values[:0] + bp = BlockPlacement(slice(0, 0)) + nb = blk.make_block_same_class(arr, placement=bp) + blocks = [nb] + else: + blocks = [] + return type(self).from_blocks(blocks, axes) + + def __nonzero__(self) -> bool: + return True + + # Python3 compat + __bool__ = __nonzero__ + + def _normalize_axis(self, axis: AxisInt) -> int: + # switch axis to follow BlockManager logic + if self.ndim == 2: + axis = 1 if axis == 0 else 0 + return axis + + def set_axis(self, axis: AxisInt, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + self._validate_set_axis(axis, new_labels) + self.axes[axis] = new_labels + + @property + def is_single_block(self) -> bool: + # Assumes we are 2D; overridden by SingleBlockManager + return len(self.blocks) == 1 + + @property + def items(self) -> Index: + return self.axes[0] + + def _has_no_reference(self, i: int) -> bool: + """ + Check for column `i` if it has references. + (whether it references another array or is itself being referenced) + Returns True if the column has no references. + """ + blkno = self.blknos[i] + return self._has_no_reference_block(blkno) + + def _has_no_reference_block(self, blkno: int) -> bool: + """ + Check for block `i` if it has references. + (whether it references another array or is itself being referenced) + Returns True if the block has no references. + """ + return not self.blocks[blkno].refs.has_reference() + + def add_references(self, mgr: BaseBlockManager) -> None: + """ + Adds the references from one manager to another. We assume that both + managers have the same block structure. + """ + if len(self.blocks) != len(mgr.blocks): + # If block structure changes, then we made a copy + return + for i, blk in enumerate(self.blocks): + blk.refs = mgr.blocks[i].refs + blk.refs.add_reference(blk) + + def references_same_values(self, mgr: BaseBlockManager, blkno: int) -> bool: + """ + Checks if two blocks from two different block managers reference the + same underlying values. + """ + blk = self.blocks[blkno] + return any(blk is ref() for ref in mgr.blocks[blkno].refs.referenced_blocks) + + def get_dtypes(self) -> npt.NDArray[np.object_]: + dtypes = np.array([blk.dtype for blk in self.blocks], dtype=object) + return dtypes.take(self.blknos) + + @property + def arrays(self) -> list[ArrayLike]: + """ + Quick access to the backing arrays of the Blocks. + + Only for compatibility with ArrayManager for testing convenience. + Not to be used in actual code, and return value is not the same as the + ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs). + + Warning! The returned arrays don't handle Copy-on-Write, so this should + be used with caution (only in read-mode). + """ + return [blk.values for blk in self.blocks] + + def __repr__(self) -> str: + output = type(self).__name__ + for i, ax in enumerate(self.axes): + if i == 0: + output += f"\nItems: {ax}" + else: + output += f"\nAxis {i}: {ax}" + + for block in self.blocks: + output += f"\n{block}" + return output + + def apply( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + """ + Iterate over the blocks, collect and create a new BlockManager. + + Parameters + ---------- + f : str or callable + Name of the Block method to apply. + align_keys: List[str] or None, default None + **kwargs + Keywords to pass to `f` + + Returns + ------- + BlockManager + """ + assert "filter" not in kwargs + + align_keys = align_keys or [] + result_blocks: list[Block] = [] + # fillna: Series/DataFrame is responsible for making sure value is aligned + + aligned_args = {k: kwargs[k] for k in align_keys} + + for b in self.blocks: + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values + else: + kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values + else: + # otherwise we have an ndarray + kwargs[k] = obj[b.mgr_locs.indexer] + + if callable(f): + applied = b.apply(f, **kwargs) + else: + applied = getattr(b, f)(**kwargs) + result_blocks = extend_blocks(applied, result_blocks) + + out = type(self).from_blocks(result_blocks, self.axes) + return out + + # Alias so we can share code with ArrayManager + apply_with_block = apply + + def setitem(self, indexer, value, warn: bool = True) -> Self: + """ + Set values with indexer. + + For SingleBlockManager, this backs s[indexer] = value + """ + if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim: + raise ValueError(f"Cannot set values with ndim > {self.ndim}") + + if warn and warn_copy_on_write() and not self._has_no_reference(0): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + + elif using_copy_on_write() and not self._has_no_reference(0): + # this method is only called if there is a single block -> hardcoded 0 + # Split blocks to only copy the columns we want to modify + if self.ndim == 2 and isinstance(indexer, tuple): + blk_loc = self.blklocs[indexer[1]] + if is_list_like(blk_loc) and blk_loc.ndim == 2: + blk_loc = np.squeeze(blk_loc, axis=0) + elif not is_list_like(blk_loc): + # Keep dimension and copy data later + blk_loc = [blk_loc] # type: ignore[assignment] + if len(blk_loc) == 0: + return self.copy(deep=False) + + values = self.blocks[0].values + if values.ndim == 2: + values = values[blk_loc] + # "T" has no attribute "_iset_split_block" + self._iset_split_block( # type: ignore[attr-defined] + 0, blk_loc, values + ) + # first block equals values + self.blocks[0].setitem((indexer[0], np.arange(len(blk_loc))), value) + return self + # No need to split if we either set all columns or on a single block + # manager + self = self.copy() + + return self.apply("setitem", indexer=indexer, value=value) + + def diff(self, n: int) -> Self: + # only reached with self.ndim == 2 + return self.apply("diff", n=n) + + def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self: + if copy is None: + if using_copy_on_write(): + copy = False + else: + copy = True + elif using_copy_on_write(): + copy = False + + return self.apply( + "astype", + dtype=dtype, + copy=copy, + errors=errors, + using_cow=using_copy_on_write(), + ) + + def convert(self, copy: bool | None) -> Self: + if copy is None: + if using_copy_on_write(): + copy = False + else: + copy = True + elif using_copy_on_write(): + copy = False + + return self.apply("convert", copy=copy, using_cow=using_copy_on_write()) + + def convert_dtypes(self, **kwargs): + if using_copy_on_write(): + copy = False + else: + copy = True + + return self.apply( + "convert_dtypes", copy=copy, using_cow=using_copy_on_write(), **kwargs + ) + + def get_values_for_csv( + self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None + ) -> Self: + """ + Convert values to native types (strings / python objects) that are used + in formatting (repr / csv). + """ + return self.apply( + "get_values_for_csv", + na_rep=na_rep, + quoting=quoting, + float_format=float_format, + date_format=date_format, + decimal=decimal, + ) + + @property + def any_extension_types(self) -> bool: + """Whether any of the blocks in this manager are extension blocks""" + return any(block.is_extension for block in self.blocks) + + @property + def is_view(self) -> bool: + """return a boolean if we are a single block and are a view""" + if len(self.blocks) == 1: + return self.blocks[0].is_view + + # It is technically possible to figure out which blocks are views + # e.g. [ b.values.base is not None for b in self.blocks ] + # but then we have the case of possibly some blocks being a view + # and some blocks not. setting in theory is possible on the non-view + # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit + # complicated + + return False + + def _get_data_subset(self, predicate: Callable) -> Self: + blocks = [blk for blk in self.blocks if predicate(blk.values)] + return self._combine(blocks) + + def get_bool_data(self) -> Self: + """ + Select blocks that are bool-dtype and columns from object-dtype blocks + that are all-bool. + """ + + new_blocks = [] + + for blk in self.blocks: + if blk.dtype == bool: + new_blocks.append(blk) + + elif blk.is_object: + nbs = blk._split() + new_blocks.extend(nb for nb in nbs if nb.is_bool) + + return self._combine(new_blocks) + + def get_numeric_data(self) -> Self: + numeric_blocks = [blk for blk in self.blocks if blk.is_numeric] + if len(numeric_blocks) == len(self.blocks): + # Avoid somewhat expensive _combine + return self + return self._combine(numeric_blocks) + + def _combine(self, blocks: list[Block], index: Index | None = None) -> Self: + """return a new manager with the blocks""" + if len(blocks) == 0: + if self.ndim == 2: + # retain our own Index dtype + if index is not None: + axes = [self.items[:0], index] + else: + axes = [self.items[:0]] + self.axes[1:] + return self.make_empty(axes) + return self.make_empty() + + # FIXME: optimization potential + indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) + inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) + + new_blocks: list[Block] = [] + for b in blocks: + nb = b.copy(deep=False) + nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer]) + new_blocks.append(nb) + + axes = list(self.axes) + if index is not None: + axes[-1] = index + axes[0] = self.items.take(indexer) + + return type(self).from_blocks(new_blocks, axes) + + @property + def nblocks(self) -> int: + return len(self.blocks) + + def copy(self, deep: bool | None | Literal["all"] = True) -> Self: + """ + Make deep or shallow copy of BlockManager + + Parameters + ---------- + deep : bool, string or None, default True + If False or None, return a shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + BlockManager + """ + if deep is None: + if using_copy_on_write(): + # use shallow copy + deep = False + else: + # preserve deep copy for BlockManager with copy=None + deep = True + + # this preserves the notion of view copying of axes + if deep: + # hit in e.g. tests.io.json.test_pandas + + def copy_func(ax): + return ax.copy(deep=True) if deep == "all" else ax.view() + + new_axes = [copy_func(ax) for ax in self.axes] + else: + if using_copy_on_write(): + new_axes = [ax.view() for ax in self.axes] + else: + new_axes = list(self.axes) + + res = self.apply("copy", deep=deep) + res.axes = new_axes + + if self.ndim > 1: + # Avoid needing to re-compute these + blknos = self._blknos + if blknos is not None: + res._blknos = blknos.copy() + res._blklocs = self._blklocs.copy() + + if deep: + res._consolidate_inplace() + return res + + def consolidate(self) -> Self: + """ + Join together blocks having same dtype + + Returns + ------- + y : BlockManager + """ + if self.is_consolidated(): + return self + + bm = type(self)(self.blocks, self.axes, verify_integrity=False) + bm._is_consolidated = False + bm._consolidate_inplace() + return bm + + def reindex_indexer( + self, + new_axis: Index, + indexer: npt.NDArray[np.intp] | None, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool | None = True, + only_slice: bool = False, + *, + use_na_proxy: bool = False, + ) -> Self: + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray[intp] or None + axis : int + fill_value : object, default None + allow_dups : bool, default False + copy : bool or None, default True + If None, regard as False to get shallow copy. + only_slice : bool, default False + Whether to take views, not copies, along columns. + use_na_proxy : bool, default False + Whether to use a np.void ndarray for newly introduced columns. + + pandas-indexer with -1's only. + """ + if copy is None: + if using_copy_on_write(): + # use shallow copy + copy = False + else: + # preserve deep copy for BlockManager with copy=None + copy = True + + if indexer is None: + if new_axis is self.axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result.axes = list(self.axes) + result.axes[axis] = new_axis + return result + + # Should be intp, but in some cases we get int64 on 32bit builds + assert isinstance(indexer, np.ndarray) + + # some axes don't allow reindexing with dups + if not allow_dups: + self.axes[axis]._validate_can_reindex(indexer) + + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0( + indexer, + fill_value=fill_value, + only_slice=only_slice, + use_na_proxy=use_na_proxy, + ) + else: + new_blocks = [ + blk.take_nd( + indexer, + axis=1, + fill_value=( + fill_value if fill_value is not None else blk.fill_value + ), + ) + for blk in self.blocks + ] + + new_axes = list(self.axes) + new_axes[axis] = new_axis + + new_mgr = type(self).from_blocks(new_blocks, new_axes) + if axis == 1: + # We can avoid the need to rebuild these + new_mgr._blknos = self.blknos.copy() + new_mgr._blklocs = self.blklocs.copy() + return new_mgr + + def _slice_take_blocks_ax0( + self, + slice_or_indexer: slice | np.ndarray, + fill_value=lib.no_default, + only_slice: bool = False, + *, + use_na_proxy: bool = False, + ref_inplace_op: bool = False, + ) -> list[Block]: + """ + Slice/take blocks along axis=0. + + Overloaded for SingleBlock + + Parameters + ---------- + slice_or_indexer : slice or np.ndarray[int64] + fill_value : scalar, default lib.no_default + only_slice : bool, default False + If True, we always return views on existing arrays, never copies. + This is used when called from ops.blockwise.operate_blockwise. + use_na_proxy : bool, default False + Whether to use a np.void ndarray for newly introduced columns. + ref_inplace_op: bool, default False + Don't track refs if True because we operate inplace + + Returns + ------- + new_blocks : list of Block + """ + allow_fill = fill_value is not lib.no_default + + sl_type, slobj, sllen = _preprocess_slice_or_indexer( + slice_or_indexer, self.shape[0], allow_fill=allow_fill + ) + + if self.is_single_block: + blk = self.blocks[0] + + if sl_type == "slice": + # GH#32959 EABlock would fail since we can't make 0-width + # TODO(EA2D): special casing unnecessary with 2D EAs + if sllen == 0: + return [] + bp = BlockPlacement(slice(0, sllen)) + return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)] + elif not allow_fill or self.ndim == 1: + if allow_fill and fill_value is None: + fill_value = blk.fill_value + + if not allow_fill and only_slice: + # GH#33597 slice instead of take, so we get + # views instead of copies + blocks = [ + blk.getitem_block_columns( + slice(ml, ml + 1), + new_mgr_locs=BlockPlacement(i), + ref_inplace_op=ref_inplace_op, + ) + for i, ml in enumerate(slobj) + ] + return blocks + else: + bp = BlockPlacement(slice(0, sllen)) + return [ + blk.take_nd( + slobj, + axis=0, + new_mgr_locs=bp, + fill_value=fill_value, + ) + ] + + if sl_type == "slice": + blknos = self.blknos[slobj] + blklocs = self.blklocs[slobj] + else: + blknos = algos.take_nd( + self.blknos, slobj, fill_value=-1, allow_fill=allow_fill + ) + blklocs = algos.take_nd( + self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill + ) + + # When filling blknos, make sure blknos is updated before appending to + # blocks list, that way new blkno is exactly len(blocks). + blocks = [] + group = not only_slice + for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group): + if blkno == -1: + # If we've got here, fill_value was not lib.no_default + + blocks.append( + self._make_na_block( + placement=mgr_locs, + fill_value=fill_value, + use_na_proxy=use_na_proxy, + ) + ) + else: + blk = self.blocks[blkno] + + # Otherwise, slicing along items axis is necessary. + if not blk._can_consolidate and not blk._validate_ndim: + # i.e. we dont go through here for DatetimeTZBlock + # A non-consolidatable block, it's easy, because there's + # only one item and each mgr loc is a copy of that single + # item. + deep = not (only_slice or using_copy_on_write()) + for mgr_loc in mgr_locs: + newblk = blk.copy(deep=deep) + newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1)) + blocks.append(newblk) + + else: + # GH#32779 to avoid the performance penalty of copying, + # we may try to only slice + taker = blklocs[mgr_locs.indexer] + max_len = max(len(mgr_locs), taker.max() + 1) + if only_slice or using_copy_on_write(): + taker = lib.maybe_indices_to_slice(taker, max_len) + + if isinstance(taker, slice): + nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs) + blocks.append(nb) + elif only_slice: + # GH#33597 slice instead of take, so we get + # views instead of copies + for i, ml in zip(taker, mgr_locs): + slc = slice(i, i + 1) + bp = BlockPlacement(ml) + nb = blk.getitem_block_columns(slc, new_mgr_locs=bp) + # We have np.shares_memory(nb.values, blk.values) + blocks.append(nb) + else: + nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs) + blocks.append(nb) + + return blocks + + def _make_na_block( + self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False + ) -> Block: + # Note: we only get here with self.ndim == 2 + + if use_na_proxy: + assert fill_value is None + shape = (len(placement), self.shape[1]) + vals = np.empty(shape, dtype=np.void) + nb = NumpyBlock(vals, placement, ndim=2) + return nb + + if fill_value is None: + fill_value = np.nan + + shape = (len(placement), self.shape[1]) + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + block_values = make_na_array(dtype, shape, fill_value) + return new_block_2d(block_values, placement=placement) + + def take( + self, + indexer: npt.NDArray[np.intp], + axis: AxisInt = 1, + verify: bool = True, + ) -> Self: + """ + Take items along any axis. + + indexer : np.ndarray[np.intp] + axis : int, default 1 + verify : bool, default True + Check that all entries are between 0 and len(self) - 1, inclusive. + Pass verify=False if this check has been done by the caller. + + Returns + ------- + BlockManager + """ + # Caller is responsible for ensuring indexer annotation is accurate + + n = self.shape[axis] + indexer = maybe_convert_indices(indexer, n, verify=verify) + + new_labels = self.axes[axis].take(indexer) + return self.reindex_indexer( + new_axis=new_labels, + indexer=indexer, + axis=axis, + allow_dups=True, + copy=None, + ) + + +class BlockManager(libinternals.BlockManager, BaseBlockManager): + """ + BaseBlockManager that holds 2D blocks. + """ + + ndim = 2 + + # ---------------------------------------------------------------- + # Constructors + + def __init__( + self, + blocks: Sequence[Block], + axes: Sequence[Index], + verify_integrity: bool = True, + ) -> None: + if verify_integrity: + # Assertion disabled for performance + # assert all(isinstance(x, Index) for x in axes) + + for block in blocks: + if self.ndim != block.ndim: + raise AssertionError( + f"Number of Block dimensions ({block.ndim}) must equal " + f"number of axes ({self.ndim})" + ) + # As of 2.0, the caller is responsible for ensuring that + # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2; + # previously there was a special check for fastparquet compat. + + self._verify_integrity() + + def _verify_integrity(self) -> None: + mgr_shape = self.shape + tot_items = sum(len(x.mgr_locs) for x in self.blocks) + for block in self.blocks: + if block.shape[1:] != mgr_shape[1:]: + raise_construction_error(tot_items, block.shape[1:], self.axes) + if len(self.items) != tot_items: + raise AssertionError( + "Number of manager items must equal union of " + f"block items\n# manager items: {len(self.items)}, # " + f"tot_items: {tot_items}" + ) + + @classmethod + def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: + """ + Constructor for BlockManager and SingleBlockManager with same signature. + """ + return cls(blocks, axes, verify_integrity=False) + + # ---------------------------------------------------------------- + # Indexing + + def fast_xs(self, loc: int) -> SingleBlockManager: + """ + Return the array corresponding to `frame.iloc[loc]`. + + Parameters + ---------- + loc : int + + Returns + ------- + np.ndarray or ExtensionArray + """ + if len(self.blocks) == 1: + # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like; + # is this ruled out in the general case? + result = self.blocks[0].iget((slice(None), loc)) + # in the case of a single block, the new block is a view + bp = BlockPlacement(slice(0, len(result))) + block = new_block( + result, + placement=bp, + ndim=1, + refs=self.blocks[0].refs, + ) + return SingleBlockManager(block, self.axes[0]) + + dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) + + n = len(self) + + if isinstance(dtype, ExtensionDtype): + # TODO: use object dtype as workaround for non-performant + # EA.__setitem__ methods. (primarily ArrowExtensionArray.__setitem__ + # when iteratively setting individual values) + # https://github.com/pandas-dev/pandas/pull/54508#issuecomment-1675827918 + result = np.empty(n, dtype=object) + else: + result = np.empty(n, dtype=dtype) + result = ensure_wrapped_if_datetimelike(result) + + for blk in self.blocks: + # Such assignment may incorrectly coerce NaT to None + # result[blk.mgr_locs] = blk._slice((slice(None), loc)) + for i, rl in enumerate(blk.mgr_locs): + result[rl] = blk.iget((i, loc)) + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + result = cls._from_sequence(result, dtype=dtype) + + bp = BlockPlacement(slice(0, len(result))) + block = new_block(result, placement=bp, ndim=1) + return SingleBlockManager(block, self.axes[0]) + + def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager: + """ + Return the data as a SingleBlockManager. + """ + block = self.blocks[self.blknos[i]] + values = block.iget(self.blklocs[i]) + + # shortcut for select a single-dim from a 2-dim BM + bp = BlockPlacement(slice(0, len(values))) + nb = type(block)( + values, placement=bp, ndim=1, refs=block.refs if track_ref else None + ) + return SingleBlockManager(nb, self.axes[1]) + + def iget_values(self, i: int) -> ArrayLike: + """ + Return the data for column i as the values (ndarray or ExtensionArray). + + Warning! The returned array is a view but doesn't handle Copy-on-Write, + so this should be used with caution. + """ + # TODO(CoW) making the arrays read-only might make this safer to use? + block = self.blocks[self.blknos[i]] + values = block.iget(self.blklocs[i]) + return values + + @property + def column_arrays(self) -> list[np.ndarray]: + """ + Used in the JSON C code to access column arrays. + This optimizes compared to using `iget_values` by converting each + + Warning! This doesn't handle Copy-on-Write, so should be used with + caution (current use case of consuming this in the JSON code is fine). + """ + # This is an optimized equivalent to + # result = [self.iget_values(i) for i in range(len(self.items))] + result: list[np.ndarray | None] = [None] * len(self.items) + + for blk in self.blocks: + mgr_locs = blk._mgr_locs + values = blk.array_values._values_for_json() + if values.ndim == 1: + # TODO(EA2D): special casing not needed with 2D EAs + result[mgr_locs[0]] = values + + else: + for i, loc in enumerate(mgr_locs): + result[loc] = values[i] + + # error: Incompatible return value type (got "List[None]", + # expected "List[ndarray[Any, Any]]") + return result # type: ignore[return-value] + + def iset( + self, + loc: int | slice | np.ndarray, + value: ArrayLike, + inplace: bool = False, + refs: BlockValuesRefs | None = None, + ) -> None: + """ + Set new item in-place. Does not consolidate. Adds new Block if not + contained in the current set of items + """ + + # FIXME: refactor, clearly separate broadcasting & zip-like assignment + # can prob also fix the various if tests for sparse/categorical + if self._blklocs is None and self.ndim > 1: + self._rebuild_blknos_and_blklocs() + + # Note: we exclude DTA/TDA here + value_is_extension_type = is_1d_only_ea_dtype(value.dtype) + if not value_is_extension_type: + if value.ndim == 2: + value = value.T + else: + value = ensure_block_shape(value, ndim=2) + + if value.shape[1:] != self.shape[1:]: + raise AssertionError( + "Shape of new values must be compatible with manager shape" + ) + + if lib.is_integer(loc): + # We have 6 tests where loc is _not_ an int. + # In this case, get_blkno_placements will yield only one tuple, + # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1))) + + # Check if we can use _iset_single fastpath + loc = cast(int, loc) + blkno = self.blknos[loc] + blk = self.blocks[blkno] + if len(blk._mgr_locs) == 1: # TODO: fastest way to check this? + return self._iset_single( + loc, + value, + inplace=inplace, + blkno=blkno, + blk=blk, + refs=refs, + ) + + # error: Incompatible types in assignment (expression has type + # "List[Union[int, slice, ndarray]]", variable has type "Union[int, + # slice, ndarray]") + loc = [loc] # type: ignore[assignment] + + # categorical/sparse/datetimetz + if value_is_extension_type: + + def value_getitem(placement): + return value + + else: + + def value_getitem(placement): + return value[placement.indexer] + + # Accessing public blknos ensures the public versions are initialized + blknos = self.blknos[loc] + blklocs = self.blklocs[loc].copy() + + unfit_mgr_locs = [] + unfit_val_locs = [] + removed_blknos = [] + for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True): + blk = self.blocks[blkno_l] + blk_locs = blklocs[val_locs.indexer] + if inplace and blk.should_store(value): + # Updating inplace -> check if we need to do Copy-on-Write + if using_copy_on_write() and not self._has_no_reference_block(blkno_l): + self._iset_split_block( + blkno_l, blk_locs, value_getitem(val_locs), refs=refs + ) + else: + blk.set_inplace(blk_locs, value_getitem(val_locs)) + continue + else: + unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) + unfit_val_locs.append(val_locs) + + # If all block items are unfit, schedule the block for removal. + if len(val_locs) == len(blk.mgr_locs): + removed_blknos.append(blkno_l) + continue + else: + # Defer setting the new values to enable consolidation + self._iset_split_block(blkno_l, blk_locs, refs=refs) + + if len(removed_blknos): + # Remove blocks & update blknos accordingly + is_deleted = np.zeros(self.nblocks, dtype=np.bool_) + is_deleted[removed_blknos] = True + + new_blknos = np.empty(self.nblocks, dtype=np.intp) + new_blknos.fill(-1) + new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) + self._blknos = new_blknos[self._blknos] + self.blocks = tuple( + blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos) + ) + + if unfit_val_locs: + unfit_idxr = np.concatenate(unfit_mgr_locs) + unfit_count = len(unfit_idxr) + + new_blocks: list[Block] = [] + if value_is_extension_type: + # This code (ab-)uses the fact that EA blocks contain only + # one item. + # TODO(EA2D): special casing unnecessary with 2D EAs + new_blocks.extend( + new_block_2d( + values=value, + placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), + refs=refs, + ) + for mgr_loc in unfit_idxr + ) + + self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) + self._blklocs[unfit_idxr] = 0 + + else: + # unfit_val_locs contains BlockPlacement objects + unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) + + new_blocks.append( + new_block_2d( + values=value_getitem(unfit_val_items), + placement=BlockPlacement(unfit_idxr), + refs=refs, + ) + ) + + self._blknos[unfit_idxr] = len(self.blocks) + self._blklocs[unfit_idxr] = np.arange(unfit_count) + + self.blocks += tuple(new_blocks) + + # Newly created block's dtype may already be present. + self._known_consolidated = False + + def _iset_split_block( + self, + blkno_l: int, + blk_locs: np.ndarray | list[int], + value: ArrayLike | None = None, + refs: BlockValuesRefs | None = None, + ) -> None: + """Removes columns from a block by splitting the block. + + Avoids copying the whole block through slicing and updates the manager + after determinint the new block structure. Optionally adds a new block, + otherwise has to be done by the caller. + + Parameters + ---------- + blkno_l: The block number to operate on, relevant for updating the manager + blk_locs: The locations of our block that should be deleted. + value: The value to set as a replacement. + refs: The reference tracking object of the value to set. + """ + blk = self.blocks[blkno_l] + + if self._blklocs is None: + self._rebuild_blknos_and_blklocs() + + nbs_tup = tuple(blk.delete(blk_locs)) + if value is not None: + locs = blk.mgr_locs.as_array[blk_locs] + first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs) + else: + first_nb = nbs_tup[0] + nbs_tup = tuple(nbs_tup[1:]) + + nr_blocks = len(self.blocks) + blocks_tup = ( + self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup + ) + self.blocks = blocks_tup + + if not nbs_tup and value is not None: + # No need to update anything if split did not happen + return + + self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb)) + + for i, nb in enumerate(nbs_tup): + self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb)) + self._blknos[nb.mgr_locs.indexer] = i + nr_blocks + + def _iset_single( + self, + loc: int, + value: ArrayLike, + inplace: bool, + blkno: int, + blk: Block, + refs: BlockValuesRefs | None = None, + ) -> None: + """ + Fastpath for iset when we are only setting a single position and + the Block currently in that position is itself single-column. + + In this case we can swap out the entire Block and blklocs and blknos + are unaffected. + """ + # Caller is responsible for verifying value.shape + + if inplace and blk.should_store(value): + copy = False + if using_copy_on_write() and not self._has_no_reference_block(blkno): + # perform Copy-on-Write and clear the reference + copy = True + iloc = self.blklocs[loc] + blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy) + return + + nb = new_block_2d(value, placement=blk._mgr_locs, refs=refs) + old_blocks = self.blocks + new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :] + self.blocks = new_blocks + return + + def column_setitem( + self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False + ) -> None: + """ + Set values ("setitem") into a single column (not setting the full column). + + This is a method on the BlockManager level, to avoid creating an + intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) + """ + needs_to_warn = False + if warn_copy_on_write() and not self._has_no_reference(loc): + if not isinstance( + self.blocks[self.blknos[loc]].values, + (ArrowExtensionArray, ArrowStringArray), + ): + # We might raise if we are in an expansion case, so defer + # warning till we actually updated + needs_to_warn = True + + elif using_copy_on_write() and not self._has_no_reference(loc): + blkno = self.blknos[loc] + # Split blocks to only copy the column we want to modify + blk_loc = self.blklocs[loc] + # Copy our values + values = self.blocks[blkno].values + if values.ndim == 1: + values = values.copy() + else: + # Use [blk_loc] as indexer to keep ndim=2, this already results in a + # copy + values = values[[blk_loc]] + self._iset_split_block(blkno, [blk_loc], values) + + # this manager is only created temporarily to mutate the values in place + # so don't track references, otherwise the `setitem` would perform CoW again + col_mgr = self.iget(loc, track_ref=False) + if inplace_only: + col_mgr.setitem_inplace(idx, value) + else: + new_mgr = col_mgr.setitem((idx,), value) + self.iset(loc, new_mgr._block.values, inplace=True) + + if needs_to_warn: + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + + def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None: + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : np.ndarray or ExtensionArray + refs : The reference tracking object of the value to set. + """ + with warnings.catch_warnings(): + # TODO: re-issue this with setitem-specific message? + warnings.filterwarnings( + "ignore", + "The behavior of Index.insert with object-dtype is deprecated", + category=FutureWarning, + ) + new_axis = self.items.insert(loc, item) + + if value.ndim == 2: + value = value.T + if len(value) > 1: + raise ValueError( + f"Expected a 1D array, got an array with shape {value.T.shape}" + ) + else: + value = ensure_block_shape(value, ndim=self.ndim) + + bp = BlockPlacement(slice(loc, loc + 1)) + block = new_block_2d(values=value, placement=bp, refs=refs) + + if not len(self.blocks): + # Fastpath + self._blklocs = np.array([0], dtype=np.intp) + self._blknos = np.array([0], dtype=np.intp) + else: + self._insert_update_mgr_locs(loc) + self._insert_update_blklocs_and_blknos(loc) + + self.axes[0] = new_axis + self.blocks += (block,) + + self._known_consolidated = False + + if sum(not block.is_extension for block in self.blocks) > 100: + warnings.warn( + "DataFrame is highly fragmented. This is usually the result " + "of calling `frame.insert` many times, which has poor performance. " + "Consider joining all columns at once using pd.concat(axis=1) " + "instead. To get a de-fragmented frame, use `newframe = frame.copy()`", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + def _insert_update_mgr_locs(self, loc) -> None: + """ + When inserting a new Block at location 'loc', we increment + all of the mgr_locs of blocks above that by one. + """ + for blkno, count in _fast_count_smallints(self.blknos[loc:]): + # .620 this way, .326 of which is in increment_above + blk = self.blocks[blkno] + blk._mgr_locs = blk._mgr_locs.increment_above(loc) + + def _insert_update_blklocs_and_blknos(self, loc) -> None: + """ + When inserting a new Block at location 'loc', we update our + _blklocs and _blknos. + """ + + # Accessing public blklocs ensures the public versions are initialized + if loc == self.blklocs.shape[0]: + # np.append is a lot faster, let's use it if we can. + self._blklocs = np.append(self._blklocs, 0) + self._blknos = np.append(self._blknos, len(self.blocks)) + elif loc == 0: + # np.append is a lot faster, let's use it if we can. + self._blklocs = np.append(self._blklocs[::-1], 0)[::-1] + self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1] + else: + new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos( + self.blklocs, self.blknos, loc, len(self.blocks) + ) + self._blklocs = new_blklocs + self._blknos = new_blknos + + def idelete(self, indexer) -> BlockManager: + """ + Delete selected locations, returning a new BlockManager. + """ + is_deleted = np.zeros(self.shape[0], dtype=np.bool_) + is_deleted[indexer] = True + taker = (~is_deleted).nonzero()[0] + + nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True) + new_columns = self.items[~is_deleted] + axes = [new_columns, self.axes[1]] + return type(self)(tuple(nbs), axes, verify_integrity=False) + + # ---------------------------------------------------------------- + # Block-wise Operation + + def grouped_reduce(self, func: Callable) -> Self: + """ + Apply grouped reduction function blockwise, returning a new BlockManager. + + Parameters + ---------- + func : grouped reduction function + + Returns + ------- + BlockManager + """ + result_blocks: list[Block] = [] + + for blk in self.blocks: + if blk.is_object: + # split on object-dtype blocks bc some columns may raise + # while others do not. + for sb in blk._split(): + applied = sb.apply(func) + result_blocks = extend_blocks(applied, result_blocks) + else: + applied = blk.apply(func) + result_blocks = extend_blocks(applied, result_blocks) + + if len(result_blocks) == 0: + nrows = 0 + else: + nrows = result_blocks[0].values.shape[-1] + index = Index(range(nrows)) + + return type(self).from_blocks(result_blocks, [self.axes[0], index]) + + def reduce(self, func: Callable) -> Self: + """ + Apply reduction function blockwise, returning a single-row BlockManager. + + Parameters + ---------- + func : reduction function + + Returns + ------- + BlockManager + """ + # If 2D, we assume that we're operating column-wise + assert self.ndim == 2 + + res_blocks: list[Block] = [] + for blk in self.blocks: + nbs = blk.reduce(func) + res_blocks.extend(nbs) + + index = Index([None]) # placeholder + new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) + return new_mgr + + def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: + """ + Apply array_op blockwise with another (aligned) BlockManager. + """ + return operate_blockwise(self, other, array_op) + + def _equal_values(self: BlockManager, other: BlockManager) -> bool: + """ + Used in .equals defined in base class. Only check the column values + assuming shape and indexes have already been checked. + """ + return blockwise_all(self, other, array_equals) + + def quantile( + self, + *, + qs: Index, # with dtype float 64 + interpolation: QuantileInterpolation = "linear", + ) -> Self: + """ + Iterate over blocks applying quantile reduction. + This routine is intended for reduction type operations and + will do inference on the generated blocks. + + Parameters + ---------- + interpolation : type of interpolation, default 'linear' + qs : list of the quantiles to be computed + + Returns + ------- + BlockManager + """ + # Series dispatches to DataFrame for quantile, which allows us to + # simplify some of the code here and in the blocks + assert self.ndim >= 2 + assert is_list_like(qs) # caller is responsible for this + + new_axes = list(self.axes) + new_axes[1] = Index(qs, dtype=np.float64) + + blocks = [ + blk.quantile(qs=qs, interpolation=interpolation) for blk in self.blocks + ] + + return type(self)(blocks, new_axes) + + # ---------------------------------------------------------------- + + def unstack(self, unstacker, fill_value) -> BlockManager: + """ + Return a BlockManager with all blocks unstacked. + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : Any + fill_value for newly introduced missing values. + + Returns + ------- + unstacked : BlockManager + """ + new_columns = unstacker.get_new_columns(self.items) + new_index = unstacker.new_index + + allow_fill = not unstacker.mask_all + if allow_fill: + # calculating the full mask once and passing it to Block._unstack is + # faster than letting calculating it in each repeated call + new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) + needs_masking = new_mask2D.any(axis=0) + else: + needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) + + new_blocks: list[Block] = [] + columns_mask: list[np.ndarray] = [] + + if len(self.items) == 0: + factor = 1 + else: + fac = len(new_columns) / len(self.items) + assert fac == int(fac) + factor = int(fac) + + for blk in self.blocks: + mgr_locs = blk.mgr_locs + new_placement = mgr_locs.tile_for_unstack(factor) + + blocks, mask = blk._unstack( + unstacker, + fill_value, + new_placement=new_placement, + needs_masking=needs_masking, + ) + + new_blocks.extend(blocks) + columns_mask.extend(mask) + + # Block._unstack should ensure this holds, + assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks) + # In turn this ensures that in the BlockManager call below + # we have len(new_columns) == sum(x.shape[0] for x in new_blocks) + # which suffices to allow us to pass verify_inegrity=False + + new_columns = new_columns[columns_mask] + + bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) + return bm + + def to_dict(self) -> dict[str, Self]: + """ + Return a dict of str(dtype) -> BlockManager + + Returns + ------- + values : a dict of dtype -> BlockManager + """ + + bd: dict[str, list[Block]] = {} + for b in self.blocks: + bd.setdefault(str(b.dtype), []).append(b) + + # TODO(EA2D): the combine will be unnecessary with 2D EAs + return {dtype: self._combine(blocks) for dtype, blocks in bd.items()} + + def as_array( + self, + dtype: np.dtype | None = None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert the blockmanager data into an numpy array. + + Parameters + ---------- + dtype : np.dtype or None, default None + Data type of the return array. + copy : bool, default False + If True then guarantee that a copy is returned. A value of + False does not guarantee that the underlying data is not + copied. + na_value : object, default lib.no_default + Value to be used as the missing value sentinel. + + Returns + ------- + arr : ndarray + """ + passed_nan = lib.is_float(na_value) and isna(na_value) + + if len(self.blocks) == 0: + arr = np.empty(self.shape, dtype=float) + return arr.transpose() + + if self.is_single_block: + blk = self.blocks[0] + + if na_value is not lib.no_default: + # We want to copy when na_value is provided to avoid + # mutating the original object + if lib.is_np_dtype(blk.dtype, "f") and passed_nan: + # We are already numpy-float and na_value=np.nan + pass + else: + copy = True + + if blk.is_extension: + # Avoid implicit conversion of extension blocks to object + + # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no + # attribute "to_numpy" + arr = blk.values.to_numpy( # type: ignore[union-attr] + dtype=dtype, + na_value=na_value, + copy=copy, + ).reshape(blk.shape) + elif not copy: + arr = np.asarray(blk.values, dtype=dtype) + else: + arr = np.array(blk.values, dtype=dtype, copy=copy) + + if using_copy_on_write() and not copy: + arr = arr.view() + arr.flags.writeable = False + else: + arr = self._interleave(dtype=dtype, na_value=na_value) + # The underlying data was copied within _interleave, so no need + # to further copy if copy=True or setting na_value + + if na_value is lib.no_default: + pass + elif arr.dtype.kind == "f" and passed_nan: + pass + else: + arr[isna(arr)] = na_value + + return arr.transpose() + + def _interleave( + self, + dtype: np.dtype | None = None, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Return ndarray from blocks with specified item order + Items must be contained in the blocks + """ + if not dtype: + # Incompatible types in assignment (expression has type + # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has + # type "Optional[dtype[Any]]") + dtype = interleaved_dtype( # type: ignore[assignment] + [blk.dtype for blk in self.blocks] + ) + + # error: Argument 1 to "ensure_np_dtype" has incompatible type + # "Optional[dtype[Any]]"; expected "Union[dtype[Any], ExtensionDtype]" + dtype = ensure_np_dtype(dtype) # type: ignore[arg-type] + result = np.empty(self.shape, dtype=dtype) + + itemmask = np.zeros(self.shape[0]) + + if dtype == np.dtype("object") and na_value is lib.no_default: + # much more performant than using to_numpy below + for blk in self.blocks: + rl = blk.mgr_locs + arr = blk.get_values(dtype) + result[rl.indexer] = arr + itemmask[rl.indexer] = 1 + return result + + for blk in self.blocks: + rl = blk.mgr_locs + if blk.is_extension: + # Avoid implicit conversion of extension blocks to object + + # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no + # attribute "to_numpy" + arr = blk.values.to_numpy( # type: ignore[union-attr] + dtype=dtype, + na_value=na_value, + ) + else: + arr = blk.get_values(dtype) + result[rl.indexer] = arr + itemmask[rl.indexer] = 1 + + if not itemmask.all(): + raise AssertionError("Some items were not contained in blocks") + + return result + + # ---------------------------------------------------------------- + # Consolidation + + def is_consolidated(self) -> bool: + """ + Return True if more than one block with the same dtype + """ + if not self._known_consolidated: + self._consolidate_check() + return self._is_consolidated + + def _consolidate_check(self) -> None: + if len(self.blocks) == 1: + # fastpath + self._is_consolidated = True + self._known_consolidated = True + return + dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate] + self._is_consolidated = len(dtypes) == len(set(dtypes)) + self._known_consolidated = True + + def _consolidate_inplace(self) -> None: + # In general, _consolidate_inplace should only be called via + # DataFrame._consolidate_inplace, otherwise we will fail to invalidate + # the DataFrame's _item_cache. The exception is for newly-created + # BlockManager objects not yet attached to a DataFrame. + if not self.is_consolidated(): + self.blocks = _consolidate(self.blocks) + self._is_consolidated = True + self._known_consolidated = True + self._rebuild_blknos_and_blklocs() + + # ---------------------------------------------------------------- + # Concatenation + + @classmethod + def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed BlockManagers horizontally. + """ + offset = 0 + blocks: list[Block] = [] + for mgr in mgrs: + for blk in mgr.blocks: + # We need to do getitem_block here otherwise we would be altering + # blk.mgr_locs in place, which would render it invalid. This is only + # relevant in the copy=False case. + nb = blk.slice_block_columns(slice(None)) + nb._mgr_locs = nb._mgr_locs.add(offset) + blocks.append(nb) + + offset += len(mgr.items) + + new_mgr = cls(tuple(blocks), axes) + return new_mgr + + @classmethod + def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed BlockManagers vertically. + """ + raise NotImplementedError("This logic lives (for now) in internals.concat") + + +class SingleBlockManager(BaseBlockManager, SingleDataManager): + """manage a single block with""" + + @property + def ndim(self) -> Literal[1]: + return 1 + + _is_consolidated = True + _known_consolidated = True + __slots__ = () + is_single_block = True + + def __init__( + self, + block: Block, + axis: Index, + verify_integrity: bool = False, + ) -> None: + # Assertions disabled for performance + # assert isinstance(block, Block), type(block) + # assert isinstance(axis, Index), type(axis) + + self.axes = [axis] + self.blocks = (block,) + + @classmethod + def from_blocks( + cls, + blocks: list[Block], + axes: list[Index], + ) -> Self: + """ + Constructor for BlockManager and SingleBlockManager with same signature. + """ + assert len(blocks) == 1 + assert len(axes) == 1 + return cls(blocks[0], axes[0], verify_integrity=False) + + @classmethod + def from_array( + cls, array: ArrayLike, index: Index, refs: BlockValuesRefs | None = None + ) -> SingleBlockManager: + """ + Constructor for if we have an array that is not yet a Block. + """ + array = maybe_coerce_values(array) + bp = BlockPlacement(slice(0, len(index))) + block = new_block(array, placement=bp, ndim=1, refs=refs) + return cls(block, index) + + def to_2d_mgr(self, columns: Index) -> BlockManager: + """ + Manager analogue of Series.to_frame + """ + blk = self.blocks[0] + arr = ensure_block_shape(blk.values, ndim=2) + bp = BlockPlacement(0) + new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs) + axes = [columns, self.axes[0]] + return BlockManager([new_blk], axes=axes, verify_integrity=False) + + def _has_no_reference(self, i: int = 0) -> bool: + """ + Check for column `i` if it has references. + (whether it references another array or is itself being referenced) + Returns True if the column has no references. + """ + return not self.blocks[0].refs.has_reference() + + def __getstate__(self): + block_values = [b.values for b in self.blocks] + block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] + axes_array = list(self.axes) + + extra_state = { + "0.14.1": { + "axes": axes_array, + "blocks": [ + {"values": b.values, "mgr_locs": b.mgr_locs.indexer} + for b in self.blocks + ], + } + } + + # First three elements of the state are to maintain forward + # compatibility with 0.13.1. + return axes_array, block_values, block_items, extra_state + + def __setstate__(self, state) -> None: + def unpickle_block(values, mgr_locs, ndim: int) -> Block: + # TODO(EA2D): ndim would be unnecessary with 2D EAs + # older pickles may store e.g. DatetimeIndex instead of DatetimeArray + values = extract_array(values, extract_numpy=True) + if not isinstance(mgr_locs, BlockPlacement): + mgr_locs = BlockPlacement(mgr_locs) + + values = maybe_coerce_values(values) + return new_block(values, placement=mgr_locs, ndim=ndim) + + if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]: + state = state[3]["0.14.1"] + self.axes = [ensure_index(ax) for ax in state["axes"]] + ndim = len(self.axes) + self.blocks = tuple( + unpickle_block(b["values"], b["mgr_locs"], ndim=ndim) + for b in state["blocks"] + ) + else: + raise NotImplementedError("pre-0.14.1 pickles are no longer supported") + + self._post_setstate() + + def _post_setstate(self) -> None: + pass + + @cache_readonly + def _block(self) -> Block: + return self.blocks[0] + + @property + def _blknos(self): + """compat with BlockManager""" + return None + + @property + def _blklocs(self): + """compat with BlockManager""" + return None + + def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Self: + # similar to get_slice, but not restricted to slice indexer + blk = self._block + if using_copy_on_write() and len(indexer) > 0 and indexer.all(): + return type(self)(blk.copy(deep=False), self.index) + array = blk.values[indexer] + + if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "b": + # boolean indexing always gives a copy with numpy + refs = None + else: + # TODO(CoW) in theory only need to track reference if new_array is a view + refs = blk.refs + + bp = BlockPlacement(slice(0, len(array))) + block = type(blk)(array, placement=bp, ndim=1, refs=refs) + + new_idx = self.index[indexer] + return type(self)(block, new_idx) + + def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleBlockManager: + # Assertion disabled for performance + # assert isinstance(slobj, slice), type(slobj) + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + blk = self._block + array = blk.values[slobj] + bp = BlockPlacement(slice(0, len(array))) + # TODO this method is only used in groupby SeriesSplitter at the moment, + # so passing refs is not yet covered by the tests + block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs) + new_index = self.index._getitem_slice(slobj) + return type(self)(block, new_index) + + @property + def index(self) -> Index: + return self.axes[0] + + @property + def dtype(self) -> DtypeObj: + return self._block.dtype + + def get_dtypes(self) -> npt.NDArray[np.object_]: + return np.array([self._block.dtype], dtype=object) + + def external_values(self): + """The array that Series.values returns""" + return self._block.external_values() + + def internal_values(self): + """The array that Series._values returns""" + return self._block.values + + def array_values(self) -> ExtensionArray: + """The array that Series.array returns""" + return self._block.array_values + + def get_numeric_data(self) -> Self: + if self._block.is_numeric: + return self.copy(deep=False) + return self.make_empty() + + @property + def _can_hold_na(self) -> bool: + return self._block._can_hold_na + + def setitem_inplace(self, indexer, value, warn: bool = True) -> None: + """ + Set values with indexer. + + For Single[Block/Array]Manager, this backs s[indexer] = value + + This is an inplace version of `setitem()`, mutating the manager/values + in place, not returning a new Manager (and Block), and thus never changing + the dtype. + """ + using_cow = using_copy_on_write() + warn_cow = warn_copy_on_write() + if (using_cow or warn_cow) and not self._has_no_reference(0): + if using_cow: + self.blocks = (self._block.copy(),) + self._cache.clear() + elif warn_cow and warn: + warnings.warn( + COW_WARNING_SETITEM_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + + super().setitem_inplace(indexer, value) + + def idelete(self, indexer) -> SingleBlockManager: + """ + Delete single location from SingleBlockManager. + + Ensures that self.blocks doesn't become empty. + """ + nb = self._block.delete(indexer)[0] + self.blocks = (nb,) + self.axes[0] = self.axes[0].delete(indexer) + self._cache.clear() + return self + + def fast_xs(self, loc): + """ + fast path for getting a cross-section + return a view of the data + """ + raise NotImplementedError("Use series._values[loc] instead") + + def set_values(self, values: ArrayLike) -> None: + """ + Set the values of the single block in place. + + Use at your own risk! This does not check if the passed values are + valid for the current Block/SingleBlockManager (length, dtype, etc), + and this does not properly keep track of references. + """ + # NOTE(CoW) Currently this is only used for FrameColumnApply.series_generator + # which handles CoW by setting the refs manually if necessary + self.blocks[0].values = values + self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values))) + + def _equal_values(self, other: Self) -> bool: + """ + Used in .equals defined in base class. Only check the column values + assuming shape and indexes have already been checked. + """ + # For SingleBlockManager (i.e.Series) + if other.ndim != 1: + return False + left = self.blocks[0].values + right = other.blocks[0].values + return array_equals(left, right) + + +# -------------------------------------------------------------------- +# Constructor Helpers + + +def create_block_manager_from_blocks( + blocks: list[Block], + axes: list[Index], + consolidate: bool = True, + verify_integrity: bool = True, +) -> BlockManager: + # If verify_integrity=False, then caller is responsible for checking + # all(x.shape[-1] == len(axes[1]) for x in blocks) + # sum(x.shape[0] for x in blocks) == len(axes[0]) + # set(x for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0]))) + # all(blk.ndim == 2 for blk in blocks) + # This allows us to safely pass verify_integrity=False + + try: + mgr = BlockManager(blocks, axes, verify_integrity=verify_integrity) + + except ValueError as err: + arrays = [blk.values for blk in blocks] + tot_items = sum(arr.shape[0] for arr in arrays) + raise_construction_error(tot_items, arrays[0].shape[1:], axes, err) + + if consolidate: + mgr._consolidate_inplace() + return mgr + + +def create_block_manager_from_column_arrays( + arrays: list[ArrayLike], + axes: list[Index], + consolidate: bool, + refs: list, +) -> BlockManager: + # Assertions disabled for performance (caller is responsible for verifying) + # assert isinstance(axes, list) + # assert all(isinstance(x, Index) for x in axes) + # assert all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) + # assert all(type(x) is not NumpyExtensionArray for x in arrays) + # assert all(x.ndim == 1 for x in arrays) + # assert all(len(x) == len(axes[1]) for x in arrays) + # assert len(arrays) == len(axes[0]) + # These last three are sufficient to allow us to safely pass + # verify_integrity=False below. + + try: + blocks = _form_blocks(arrays, consolidate, refs) + mgr = BlockManager(blocks, axes, verify_integrity=False) + except ValueError as e: + raise_construction_error(len(arrays), arrays[0].shape, axes, e) + if consolidate: + mgr._consolidate_inplace() + return mgr + + +def raise_construction_error( + tot_items: int, + block_shape: Shape, + axes: list[Index], + e: ValueError | None = None, +): + """raise a helpful message about our construction""" + passed = tuple(map(int, [tot_items] + list(block_shape))) + # Correcting the user facing error message during dataframe construction + if len(passed) <= 2: + passed = passed[::-1] + + implied = tuple(len(ax) for ax in axes) + # Correcting the user facing error message during dataframe construction + if len(implied) <= 2: + implied = implied[::-1] + + # We return the exception object instead of raising it so that we + # can raise it in the caller; mypy plays better with that + if passed == implied and e is not None: + raise e + if block_shape[0] == 0: + raise ValueError("Empty data passed with indices specified.") + raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}") + + +# ----------------------------------------------------------------------- + + +def _grouping_func(tup: tuple[int, ArrayLike]) -> tuple[int, DtypeObj]: + dtype = tup[1].dtype + + if is_1d_only_ea_dtype(dtype): + # We know these won't be consolidated, so don't need to group these. + # This avoids expensive comparisons of CategoricalDtype objects + sep = id(dtype) + else: + sep = 0 + + return sep, dtype + + +def _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list[Block]: + tuples = list(enumerate(arrays)) + + if not consolidate: + return _tuples_to_blocks_no_consolidate(tuples, refs) + + # when consolidating, we can ignore refs (either stacking always copies, + # or the EA is already copied in the calling dict_to_mgr) + + # group by dtype + grouper = itertools.groupby(tuples, _grouping_func) + + nbs: list[Block] = [] + for (_, dtype), tup_block in grouper: + block_type = get_block_type(dtype) + + if isinstance(dtype, np.dtype): + is_dtlike = dtype.kind in "mM" + + if issubclass(dtype.type, (str, bytes)): + dtype = np.dtype(object) + + values, placement = _stack_arrays(list(tup_block), dtype) + if is_dtlike: + values = ensure_wrapped_if_datetimelike(values) + blk = block_type(values, placement=BlockPlacement(placement), ndim=2) + nbs.append(blk) + + elif is_1d_only_ea_dtype(dtype): + dtype_blocks = [ + block_type(x[1], placement=BlockPlacement(x[0]), ndim=2) + for x in tup_block + ] + nbs.extend(dtype_blocks) + + else: + dtype_blocks = [ + block_type( + ensure_block_shape(x[1], 2), placement=BlockPlacement(x[0]), ndim=2 + ) + for x in tup_block + ] + nbs.extend(dtype_blocks) + return nbs + + +def _tuples_to_blocks_no_consolidate(tuples, refs) -> list[Block]: + # tuples produced within _form_blocks are of the form (placement, array) + return [ + new_block_2d( + ensure_block_shape(arr, ndim=2), placement=BlockPlacement(i), refs=ref + ) + for ((i, arr), ref) in zip(tuples, refs) + ] + + +def _stack_arrays(tuples, dtype: np.dtype): + placement, arrays = zip(*tuples) + + first = arrays[0] + shape = (len(arrays),) + first.shape + + stacked = np.empty(shape, dtype=dtype) + for i, arr in enumerate(arrays): + stacked[i] = arr + + return stacked, placement + + +def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]: + """ + Merge blocks having same dtype, exclude non-consolidating blocks + """ + # sort by _can_consolidate, dtype + gkey = lambda x: x._consolidate_key + grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) + + new_blocks: list[Block] = [] + for (_can_consolidate, dtype), group_blocks in grouper: + merged_blocks, _ = _merge_blocks( + list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate + ) + new_blocks = extend_blocks(merged_blocks, new_blocks) + return tuple(new_blocks) + + +def _merge_blocks( + blocks: list[Block], dtype: DtypeObj, can_consolidate: bool +) -> tuple[list[Block], bool]: + if len(blocks) == 1: + return blocks, False + + if can_consolidate: + # TODO: optimization potential in case all mgrs contain slices and + # combination of those slices is a slice, too. + new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) + + new_values: ArrayLike + + if isinstance(blocks[0].dtype, np.dtype): + # error: List comprehension has incompatible type List[Union[ndarray, + # ExtensionArray]]; expected List[Union[complex, generic, + # Sequence[Union[int, float, complex, str, bytes, generic]], + # Sequence[Sequence[Any]], SupportsArray]] + new_values = np.vstack([b.values for b in blocks]) # type: ignore[misc] + else: + bvals = [blk.values for blk in blocks] + bvals2 = cast(Sequence[NDArrayBackedExtensionArray], bvals) + new_values = bvals2[0]._concat_same_type(bvals2, axis=0) + + argsort = np.argsort(new_mgr_locs) + new_values = new_values[argsort] + new_mgr_locs = new_mgr_locs[argsort] + + bp = BlockPlacement(new_mgr_locs) + return [new_block_2d(new_values, placement=bp)], True + + # can't consolidate --> no merge + return blocks, False + + +def _fast_count_smallints(arr: npt.NDArray[np.intp]): + """Faster version of set(arr) for sequences of small numbers.""" + counts = np.bincount(arr) + nz = counts.nonzero()[0] + # Note: list(zip(...) outperforms list(np.c_[nz, counts[nz]]) here, + # in one benchmark by a factor of 11 + return zip(nz, counts[nz]) + + +def _preprocess_slice_or_indexer( + slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool +): + if isinstance(slice_or_indexer, slice): + return ( + "slice", + slice_or_indexer, + libinternals.slice_len(slice_or_indexer, length), + ) + else: + if ( + not isinstance(slice_or_indexer, np.ndarray) + or slice_or_indexer.dtype.kind != "i" + ): + dtype = getattr(slice_or_indexer, "dtype", None) + raise TypeError(type(slice_or_indexer), dtype) + + indexer = ensure_platform_int(slice_or_indexer) + if not allow_fill: + indexer = maybe_convert_indices(indexer, length) + return "fancy", indexer, len(indexer) + + +def make_na_array(dtype: DtypeObj, shape: Shape, fill_value) -> ArrayLike: + if isinstance(dtype, DatetimeTZDtype): + # NB: exclude e.g. pyarrow[dt64tz] dtypes + ts = Timestamp(fill_value).as_unit(dtype.unit) + i8values = np.full(shape, ts._value) + dt64values = i8values.view(f"M8[{dtype.unit}]") + return DatetimeArray._simple_new(dt64values, dtype=dtype) + + elif is_1d_only_ea_dtype(dtype): + dtype = cast(ExtensionDtype, dtype) + cls = dtype.construct_array_type() + + missing_arr = cls._from_sequence([], dtype=dtype) + ncols, nrows = shape + assert ncols == 1, ncols + empty_arr = -1 * np.ones((nrows,), dtype=np.intp) + return missing_arr.take(empty_arr, allow_fill=True, fill_value=fill_value) + elif isinstance(dtype, ExtensionDtype): + # TODO: no tests get here, a handful would if we disabled + # the dt64tz special-case above (which is faster) + cls = dtype.construct_array_type() + missing_arr = cls._empty(shape=shape, dtype=dtype) + missing_arr[:] = fill_value + return missing_arr + else: + # NB: we should never get here with dtype integer or bool; + # if we did, the missing_arr.fill would cast to gibberish + missing_arr = np.empty(shape, dtype=dtype) + missing_arr.fill(fill_value) + + if dtype.kind in "mM": + missing_arr = ensure_wrapped_if_datetimelike(missing_arr) + return missing_arr diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/ops.py b/venv/lib/python3.10/site-packages/pandas/core/internals/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9466c0bdf0bf4df623e2d819faf3ea7b36c878 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/internals/ops.py @@ -0,0 +1,154 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + NamedTuple, +) + +from pandas.core.dtypes.common import is_1d_only_ea_dtype + +if TYPE_CHECKING: + from collections.abc import Iterator + + from pandas._libs.internals import BlockPlacement + from pandas._typing import ArrayLike + + from pandas.core.internals.blocks import Block + from pandas.core.internals.managers import BlockManager + + +class BlockPairInfo(NamedTuple): + lvals: ArrayLike + rvals: ArrayLike + locs: BlockPlacement + left_ea: bool + right_ea: bool + rblk: Block + + +def _iter_block_pairs( + left: BlockManager, right: BlockManager +) -> Iterator[BlockPairInfo]: + # At this point we have already checked the parent DataFrames for + # assert rframe._indexed_same(lframe) + + for blk in left.blocks: + locs = blk.mgr_locs + blk_vals = blk.values + + left_ea = blk_vals.ndim == 1 + + rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True) + + # Assertions are disabled for performance, but should hold: + # if left_ea: + # assert len(locs) == 1, locs + # assert len(rblks) == 1, rblks + # assert rblks[0].shape[0] == 1, rblks[0].shape + + for rblk in rblks: + right_ea = rblk.values.ndim == 1 + + lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea) + info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk) + yield info + + +def operate_blockwise( + left: BlockManager, right: BlockManager, array_op +) -> BlockManager: + # At this point we have already checked the parent DataFrames for + # assert rframe._indexed_same(lframe) + + res_blks: list[Block] = [] + for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right): + res_values = array_op(lvals, rvals) + if ( + left_ea + and not right_ea + and hasattr(res_values, "reshape") + and not is_1d_only_ea_dtype(res_values.dtype) + ): + res_values = res_values.reshape(1, -1) + nbs = rblk._split_op_result(res_values) + + # Assertions are disabled for performance, but should hold: + # if right_ea or left_ea: + # assert len(nbs) == 1 + # else: + # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape) + + _reset_block_mgr_locs(nbs, locs) + + res_blks.extend(nbs) + + # Assertions are disabled for performance, but should hold: + # slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array} + # nlocs = sum(len(nb.mgr_locs.as_array) for nb in res_blks) + # assert nlocs == len(left.items), (nlocs, len(left.items)) + # assert len(slocs) == nlocs, (len(slocs), nlocs) + # assert slocs == set(range(nlocs)), slocs + + new_mgr = type(right)(tuple(res_blks), axes=right.axes, verify_integrity=False) + return new_mgr + + +def _reset_block_mgr_locs(nbs: list[Block], locs) -> None: + """ + Reset mgr_locs to correspond to our original DataFrame. + """ + for nb in nbs: + nblocs = locs[nb.mgr_locs.indexer] + nb.mgr_locs = nblocs + # Assertions are disabled for performance, but should hold: + # assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape) + # assert all(x in locs.as_array for x in nb.mgr_locs.as_array) + + +def _get_same_shape_values( + lblk: Block, rblk: Block, left_ea: bool, right_ea: bool +) -> tuple[ArrayLike, ArrayLike]: + """ + Slice lblk.values to align with rblk. Squeeze if we have EAs. + """ + lvals = lblk.values + rvals = rblk.values + + # Require that the indexing into lvals be slice-like + assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs + + # TODO(EA2D): with 2D EAs only this first clause would be needed + if not (left_ea or right_ea): + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[Union[ndarray, slice], slice]" + lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload] + assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) + elif left_ea and right_ea: + assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) + elif right_ea: + # lvals are 2D, rvals are 1D + + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[Union[ndarray, slice], slice]" + lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload] + assert lvals.shape[0] == 1, lvals.shape + lvals = lvals[0, :] + else: + # lvals are 1D, rvals are 2D + assert rvals.shape[0] == 1, rvals.shape + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[int, slice]" + rvals = rvals[0, :] # type: ignore[call-overload] + + return lvals, rvals + + +def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool: + """ + Blockwise `all` reduction. + """ + for info in _iter_block_pairs(left, right): + res = op(info.lvals, info.rvals) + if not res: + return False + return True diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae889a7fdbc24935c0884e8bbcdf56bde8946460 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/ops/__init__.py @@ -0,0 +1,93 @@ +""" +Arithmetic operations for PandasObjects + +This is not a public API. +""" +from __future__ import annotations + +from pandas.core.ops.array_ops import ( + arithmetic_op, + comp_method_OBJECT_ARRAY, + comparison_op, + fill_binop, + get_array_op, + logical_op, + maybe_prepare_scalar_for_op, +) +from pandas.core.ops.common import ( + get_op_result_name, + unpack_zerodim_and_defer, +) +from pandas.core.ops.docstrings import make_flex_doc +from pandas.core.ops.invalid import invalid_comparison +from pandas.core.ops.mask_ops import ( + kleene_and, + kleene_or, + kleene_xor, +) +from pandas.core.roperator import ( + radd, + rand_, + rdiv, + rdivmod, + rfloordiv, + rmod, + rmul, + ror_, + rpow, + rsub, + rtruediv, + rxor, +) + +# ----------------------------------------------------------------------------- +# constants +ARITHMETIC_BINOPS: set[str] = { + "add", + "sub", + "mul", + "pow", + "mod", + "floordiv", + "truediv", + "divmod", + "radd", + "rsub", + "rmul", + "rpow", + "rmod", + "rfloordiv", + "rtruediv", + "rdivmod", +} + + +__all__ = [ + "ARITHMETIC_BINOPS", + "arithmetic_op", + "comparison_op", + "comp_method_OBJECT_ARRAY", + "invalid_comparison", + "fill_binop", + "kleene_and", + "kleene_or", + "kleene_xor", + "logical_op", + "make_flex_doc", + "radd", + "rand_", + "rdiv", + "rdivmod", + "rfloordiv", + "rmod", + "rmul", + "ror_", + "rpow", + "rsub", + "rtruediv", + "rxor", + "unpack_zerodim_and_defer", + "get_op_result_name", + "maybe_prepare_scalar_for_op", + "get_array_op", +] diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c062decb0b674153e6961e6ed78f366eee27920d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f5581bb50f1e16f8e7e6f007852bd95123a2651 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..888cdf33ab82a203c5c48c88ec81120a870b3b3c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/dispatch.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/dispatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df12639d858a0dea8dba09a9c6295e869c682884 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/dispatch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1a9e01c430042bd981f7aec892ec1b93f8453cb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e79466bf37f0b9cea3ccd9ba83738e5e2a2c3843 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00e7906fe01eb1fa4a334e39e46954d8298ffb26 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68c4b689f674ddd5d9b010600b8de1d54bdf068e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/missing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/array_ops.py b/venv/lib/python3.10/site-packages/pandas/core/ops/array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4b762a359d321ea61660e78ec63d392f8436939d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/ops/array_ops.py @@ -0,0 +1,604 @@ +""" +Functions for arithmetic and comparison operations on NumPy arrays and +ExtensionArrays. +""" +from __future__ import annotations + +import datetime +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Any, +) +import warnings + +import numpy as np + +from pandas._libs import ( + NaT, + Timedelta, + Timestamp, + lib, + ops as libops, +) +from pandas._libs.tslibs import ( + BaseOffset, + get_supported_dtype, + is_supported_dtype, + is_unitless, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + construct_1d_object_array_from_listlike, + find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_list_like, + is_numeric_v_string_like, + is_object_dtype, + is_scalar, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import roperator +from pandas.core.computation import expressions +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.ops import missing +from pandas.core.ops.dispatch import should_extension_dispatch +from pandas.core.ops.invalid import invalid_comparison + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Shape, + ) + +# ----------------------------------------------------------------------------- +# Masking NA values and fallbacks for operations numpy does not support + + +def fill_binop(left, right, fill_value): + """ + If a non-None fill_value is given, replace null entries in left and right + with this value, but only in positions where _one_ of left/right is null, + not both. + + Parameters + ---------- + left : array-like + right : array-like + fill_value : object + + Returns + ------- + left : array-like + right : array-like + + Notes + ----- + Makes copies if fill_value is not None and NAs are present. + """ + if fill_value is not None: + left_mask = isna(left) + right_mask = isna(right) + + # one but not both + mask = left_mask ^ right_mask + + if left_mask.any(): + # Avoid making a copy if we can + left = left.copy() + left[left_mask & mask] = fill_value + + if right_mask.any(): + # Avoid making a copy if we can + right = right.copy() + right[right_mask & mask] = fill_value + + return left, right + + +def comp_method_OBJECT_ARRAY(op, x, y): + if isinstance(y, list): + # e.g. test_tuple_categories + y = construct_1d_object_array_from_listlike(y) + + if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): + if not is_object_dtype(y.dtype): + y = y.astype(np.object_) + + if isinstance(y, (ABCSeries, ABCIndex)): + y = y._values + + if x.shape != y.shape: + raise ValueError("Shapes must match", x.shape, y.shape) + result = libops.vec_compare(x.ravel(), y.ravel(), op) + else: + result = libops.scalar_compare(x.ravel(), y, op) + return result.reshape(x.shape) + + +def _masked_arith_op(x: np.ndarray, y, op): + """ + If the given arithmetic operation fails, attempt it again on + only the non-null elements of the input array(s). + + Parameters + ---------- + x : np.ndarray + y : np.ndarray, Series, Index + op : binary operator + """ + # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes + # the logic valid for both Series and DataFrame ops. + xrav = x.ravel() + + if isinstance(y, np.ndarray): + dtype = find_common_type([x.dtype, y.dtype]) + result = np.empty(x.size, dtype=dtype) + + if len(x) != len(y): + raise ValueError(x.shape, y.shape) + ymask = notna(y) + + # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex + # we would get int64 dtype, see GH#19956 + yrav = y.ravel() + mask = notna(xrav) & ymask.ravel() + + # See GH#5284, GH#5035, GH#19448 for historical reference + if mask.any(): + result[mask] = op(xrav[mask], yrav[mask]) + + else: + if not is_scalar(y): + raise TypeError( + f"Cannot broadcast np.ndarray with operand of type { type(y) }" + ) + + # mask is only meaningful for x + result = np.empty(x.size, dtype=x.dtype) + mask = notna(xrav) + + # 1 ** np.nan is 1. So we have to unmask those. + if op is pow: + mask = np.where(x == 1, False, mask) + elif op is roperator.rpow: + mask = np.where(y == 1, False, mask) + + if mask.any(): + result[mask] = op(xrav[mask], y) + + np.putmask(result, ~mask, np.nan) + result = result.reshape(x.shape) # 2D compat + return result + + +def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False): + """ + Return the result of evaluating op on the passed in values. + + If native types are not compatible, try coercion to object dtype. + + Parameters + ---------- + left : np.ndarray + right : np.ndarray or scalar + Excludes DataFrame, Series, Index, ExtensionArray. + is_cmp : bool, default False + If this a comparison operation. + + Returns + ------- + array-like + + Raises + ------ + TypeError : invalid operation + """ + if isinstance(right, str): + # can never use numexpr + func = op + else: + func = partial(expressions.evaluate, op) + + try: + result = func(left, right) + except TypeError: + if not is_cmp and ( + left.dtype == object or getattr(right, "dtype", None) == object + ): + # For object dtype, fallback to a masked operation (only operating + # on the non-missing values) + # Don't do this for comparisons, as that will handle complex numbers + # incorrectly, see GH#32047 + result = _masked_arith_op(left, right, op) + else: + raise + + if is_cmp and (is_scalar(result) or result is NotImplemented): + # numpy returned a scalar instead of operating element-wise + # e.g. numeric array vs str + # TODO: can remove this after dropping some future numpy version? + return invalid_comparison(left, right, op) + + return missing.dispatch_fill_zeros(op, left, right, result) + + +def arithmetic_op(left: ArrayLike, right: Any, op): + """ + Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ... + + Note: the caller is responsible for ensuring that numpy warnings are + suppressed (with np.errstate(all="ignore")) if needed. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame or Index. Series is *not* excluded. + op : {operator.add, operator.sub, ...} + Or one of the reversed variants from roperator. + + Returns + ------- + ndarray or ExtensionArray + Or a 2-tuple of these in the case of divmod or rdivmod. + """ + # NB: We assume that extract_array and ensure_wrapped_if_datetimelike + # have already been called on `left` and `right`, + # and `maybe_prepare_scalar_for_op` has already been called on `right` + # We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy + # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390) + + if ( + should_extension_dispatch(left, right) + or isinstance(right, (Timedelta, BaseOffset, Timestamp)) + or right is NaT + ): + # Timedelta/Timestamp and other custom scalars are included in the check + # because numexpr will fail on it, see GH#31457 + res_values = op(left, right) + else: + # TODO we should handle EAs consistently and move this check before the if/else + # (https://github.com/pandas-dev/pandas/issues/41165) + # error: Argument 2 to "_bool_arith_check" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" + _bool_arith_check(op, left, right) # type: ignore[arg-type] + + # error: Argument 1 to "_na_arithmetic_op" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" + res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type] + + return res_values + + +def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: + """ + Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`. + + Note: the caller is responsible for ensuring that numpy warnings are + suppressed (with np.errstate(all="ignore")) if needed. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame, Series, or Index. + op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le} + + Returns + ------- + ndarray or ExtensionArray + """ + # NB: We assume extract_array has already been called on left and right + lvalues = ensure_wrapped_if_datetimelike(left) + rvalues = ensure_wrapped_if_datetimelike(right) + + rvalues = lib.item_from_zerodim(rvalues) + if isinstance(rvalues, list): + # We don't catch tuple here bc we may be comparing e.g. MultiIndex + # to a tuple that represents a single entry, see test_compare_tuple_strs + rvalues = np.asarray(rvalues) + + if isinstance(rvalues, (np.ndarray, ABCExtensionArray)): + # TODO: make this treatment consistent across ops and classes. + # We are not catching all listlikes here (e.g. frozenset, tuple) + # The ambiguous case is object-dtype. See GH#27803 + if len(lvalues) != len(rvalues): + raise ValueError( + "Lengths must match to compare", lvalues.shape, rvalues.shape + ) + + if should_extension_dispatch(lvalues, rvalues) or ( + (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) + and lvalues.dtype != object + ): + # Call the method on lvalues + res_values = op(lvalues, rvalues) + + elif is_scalar(rvalues) and isna(rvalues): # TODO: but not pd.NA? + # numpy does not like comparisons vs None + if op is operator.ne: + res_values = np.ones(lvalues.shape, dtype=bool) + else: + res_values = np.zeros(lvalues.shape, dtype=bool) + + elif is_numeric_v_string_like(lvalues, rvalues): + # GH#36377 going through the numexpr path would incorrectly raise + return invalid_comparison(lvalues, rvalues, op) + + elif lvalues.dtype == object or isinstance(rvalues, str): + res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) + + else: + res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True) + + return res_values + + +def na_logical_op(x: np.ndarray, y, op): + try: + # For exposition, write: + # yarr = isinstance(y, np.ndarray) + # yint = is_integer(y) or (yarr and y.dtype.kind == "i") + # ybool = is_bool(y) or (yarr and y.dtype.kind == "b") + # xint = x.dtype.kind == "i" + # xbool = x.dtype.kind == "b" + # Then Cases where this goes through without raising include: + # (xint or xbool) and (yint or bool) + result = op(x, y) + except TypeError: + if isinstance(y, np.ndarray): + # bool-bool dtype operations should be OK, should not get here + assert not (x.dtype.kind == "b" and y.dtype.kind == "b") + x = ensure_object(x) + y = ensure_object(y) + result = libops.vec_binop(x.ravel(), y.ravel(), op) + else: + # let null fall thru + assert lib.is_scalar(y) + if not isna(y): + y = bool(y) + try: + result = libops.scalar_binop(x, y, op) + except ( + TypeError, + ValueError, + AttributeError, + OverflowError, + NotImplementedError, + ) as err: + typ = type(y).__name__ + raise TypeError( + f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array " + f"and scalar of type [{typ}]" + ) from err + + return result.reshape(x.shape) + + +def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike: + """ + Evaluate a logical operation `|`, `&`, or `^`. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame, Series, or Index. + op : {operator.and_, operator.or_, operator.xor} + Or one of the reversed variants from roperator. + + Returns + ------- + ndarray or ExtensionArray + """ + + def fill_bool(x, left=None): + # if `left` is specifically not-boolean, we do not cast to bool + if x.dtype.kind in "cfO": + # dtypes that can hold NA + mask = isna(x) + if mask.any(): + x = x.astype(object) + x[mask] = False + + if left is None or left.dtype.kind == "b": + x = x.astype(bool) + return x + + right = lib.item_from_zerodim(right) + if is_list_like(right) and not hasattr(right, "dtype"): + # e.g. list, tuple + warnings.warn( + "Logical ops (and, or, xor) between Pandas objects and dtype-less " + "sequences (e.g. list, tuple) are deprecated and will raise in a " + "future version. Wrap the object in a Series, Index, or np.array " + "before operating instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + right = construct_1d_object_array_from_listlike(right) + + # NB: We assume extract_array has already been called on left and right + lvalues = ensure_wrapped_if_datetimelike(left) + rvalues = right + + if should_extension_dispatch(lvalues, rvalues): + # Call the method on lvalues + res_values = op(lvalues, rvalues) + + else: + if isinstance(rvalues, np.ndarray): + is_other_int_dtype = rvalues.dtype.kind in "iu" + if not is_other_int_dtype: + rvalues = fill_bool(rvalues, lvalues) + + else: + # i.e. scalar + is_other_int_dtype = lib.is_integer(rvalues) + + res_values = na_logical_op(lvalues, rvalues, op) + + # For int vs int `^`, `|`, `&` are bitwise operators and return + # integer dtypes. Otherwise these are boolean ops + if not (left.dtype.kind in "iu" and is_other_int_dtype): + res_values = fill_bool(res_values) + + return res_values + + +def get_array_op(op): + """ + Return a binary array operation corresponding to the given operator op. + + Parameters + ---------- + op : function + Binary operator from operator or roperator module. + + Returns + ------- + functools.partial + """ + if isinstance(op, partial): + # We get here via dispatch_to_series in DataFrame case + # e.g. test_rolling_consistency_var_debiasing_factors + return op + + op_name = op.__name__.strip("_").lstrip("r") + if op_name == "arith_op": + # Reached via DataFrame._combine_frame i.e. flex methods + # e.g. test_df_add_flex_filled_mixed_dtypes + return op + + if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}: + return partial(comparison_op, op=op) + elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}: + return partial(logical_op, op=op) + elif op_name in { + "add", + "sub", + "mul", + "truediv", + "floordiv", + "mod", + "divmod", + "pow", + }: + return partial(arithmetic_op, op=op) + else: + raise NotImplementedError(op_name) + + +def maybe_prepare_scalar_for_op(obj, shape: Shape): + """ + Cast non-pandas objects to pandas types to unify behavior of arithmetic + and comparison operations. + + Parameters + ---------- + obj: object + shape : tuple[int] + + Returns + ------- + out : object + + Notes + ----- + Be careful to call this *after* determining the `name` attribute to be + attached to the result of the arithmetic operation. + """ + if type(obj) is datetime.timedelta: + # GH#22390 cast up to Timedelta to rely on Timedelta + # implementation; otherwise operation against numeric-dtype + # raises TypeError + return Timedelta(obj) + elif type(obj) is datetime.datetime: + # cast up to Timestamp to rely on Timestamp implementation, see Timedelta above + return Timestamp(obj) + elif isinstance(obj, np.datetime64): + # GH#28080 numpy casts integer-dtype to datetime64 when doing + # array[int] + datetime64, which we do not allow + if isna(obj): + from pandas.core.arrays import DatetimeArray + + # Avoid possible ambiguities with pd.NaT + # GH 52295 + if is_unitless(obj.dtype): + obj = obj.astype("datetime64[ns]") + elif not is_supported_dtype(obj.dtype): + new_dtype = get_supported_dtype(obj.dtype) + obj = obj.astype(new_dtype) + right = np.broadcast_to(obj, shape) + return DatetimeArray._simple_new(right, dtype=right.dtype) + + return Timestamp(obj) + + elif isinstance(obj, np.timedelta64): + if isna(obj): + from pandas.core.arrays import TimedeltaArray + + # wrapping timedelta64("NaT") in Timedelta returns NaT, + # which would incorrectly be treated as a datetime-NaT, so + # we broadcast and wrap in a TimedeltaArray + # GH 52295 + if is_unitless(obj.dtype): + obj = obj.astype("timedelta64[ns]") + elif not is_supported_dtype(obj.dtype): + new_dtype = get_supported_dtype(obj.dtype) + obj = obj.astype(new_dtype) + right = np.broadcast_to(obj, shape) + return TimedeltaArray._simple_new(right, dtype=right.dtype) + + # In particular non-nanosecond timedelta64 needs to be cast to + # nanoseconds, or else we get undesired behavior like + # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') + return Timedelta(obj) + + # We want NumPy numeric scalars to behave like Python scalars + # post NEP 50 + elif isinstance(obj, np.integer): + return int(obj) + + elif isinstance(obj, np.floating): + return float(obj) + + return obj + + +_BOOL_OP_NOT_ALLOWED = { + operator.truediv, + roperator.rtruediv, + operator.floordiv, + roperator.rfloordiv, + operator.pow, + roperator.rpow, +} + + +def _bool_arith_check(op, a: np.ndarray, b): + """ + In contrast to numpy, pandas raises an error for certain operations + with booleans. + """ + if op in _BOOL_OP_NOT_ALLOWED: + if a.dtype.kind == "b" and (is_bool_dtype(b) or lib.is_bool(b)): + op_name = op.__name__.strip("_").lstrip("r") + raise NotImplementedError( + f"operator '{op_name}' not implemented for bool dtypes" + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/common.py b/venv/lib/python3.10/site-packages/pandas/core/ops/common.py new file mode 100644 index 0000000000000000000000000000000000000000..559977bacf881552d546e7704d4cf4b12b4a32fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/ops/common.py @@ -0,0 +1,146 @@ +""" +Boilerplate functions used in defining binary operations. +""" +from __future__ import annotations + +from functools import wraps +from typing import ( + TYPE_CHECKING, + Callable, +) + +from pandas._libs.lib import item_from_zerodim +from pandas._libs.missing import is_matching_na + +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import F + + +def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]: + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Parameters + ---------- + name : str + + Returns + ------- + decorator + """ + + def wrapper(method: F) -> F: + return _unpack_zerodim_and_defer(method, name) + + return wrapper + + +def _unpack_zerodim_and_defer(method, name: str): + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Ensure method returns NotImplemented when operating against "senior" + classes. Ensure zero-dimensional ndarrays are always unpacked. + + Parameters + ---------- + method : binary method + name : str + + Returns + ------- + method + """ + stripped_name = name.removeprefix("__").removesuffix("__") + is_cmp = stripped_name in {"eq", "ne", "lt", "le", "gt", "ge"} + + @wraps(method) + def new_method(self, other): + if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries): + # For comparison ops, Index does *not* defer to Series + pass + else: + prio = getattr(other, "__pandas_priority__", None) + if prio is not None: + if prio > self.__pandas_priority__: + # e.g. other is DataFrame while self is Index/Series/EA + return NotImplemented + + other = item_from_zerodim(other) + + return method(self, other) + + return new_method + + +def get_op_result_name(left, right): + """ + Find the appropriate name to pin to an operation result. This result + should always be either an Index or a Series. + + Parameters + ---------- + left : {Series, Index} + right : object + + Returns + ------- + name : object + Usually a string + """ + if isinstance(right, (ABCSeries, ABCIndex)): + name = _maybe_match_name(left, right) + else: + name = left.name + return name + + +def _maybe_match_name(a, b): + """ + Try to find a name to attach to the result of an operation between + a and b. If only one of these has a `name` attribute, return that + name. Otherwise return a consensus name if they match or None if + they have different names. + + Parameters + ---------- + a : object + b : object + + Returns + ------- + name : str or None + + See Also + -------- + pandas.core.common.consensus_name_attr + """ + a_has = hasattr(a, "name") + b_has = hasattr(b, "name") + if a_has and b_has: + try: + if a.name == b.name: + return a.name + elif is_matching_na(a.name, b.name): + # e.g. both are np.nan + return a.name + else: + return None + except TypeError: + # pd.NA + if is_matching_na(a.name, b.name): + return a.name + return None + except ValueError: + # e.g. np.int64(1) vs (np.int64(1), np.int64(2)) + return None + elif a_has: + return a.name + elif b_has: + return b.name + return None diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/dispatch.py b/venv/lib/python3.10/site-packages/pandas/core/ops/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..a939fdd3d041e9f99dde7ea40fd7aa0572d0d9b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/ops/dispatch.py @@ -0,0 +1,30 @@ +""" +Functions for defining unary operations. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.core.dtypes.generic import ABCExtensionArray + +if TYPE_CHECKING: + from pandas._typing import ArrayLike + + +def should_extension_dispatch(left: ArrayLike, right: Any) -> bool: + """ + Identify cases where Series operation should dispatch to ExtensionArray method. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + + Returns + ------- + bool + """ + return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray) diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/docstrings.py b/venv/lib/python3.10/site-packages/pandas/core/ops/docstrings.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2e532536d8491af44631e52982217a04ef5b17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/ops/docstrings.py @@ -0,0 +1,772 @@ +""" +Templating for ops docstrings +""" +from __future__ import annotations + + +def make_flex_doc(op_name: str, typ: str) -> str: + """ + Make the appropriate substitutions for the given operation and class-typ + into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring + to attach to a generated method. + + Parameters + ---------- + op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} + typ : str {series, 'dataframe']} + + Returns + ------- + doc : str + """ + op_name = op_name.replace("__", "") + op_desc = _op_descriptions[op_name] + + op_desc_op = op_desc["op"] + assert op_desc_op is not None # for mypy + if op_name.startswith("r"): + equiv = f"other {op_desc_op} {typ}" + elif op_name == "divmod": + equiv = f"{op_name}({typ}, other)" + else: + equiv = f"{typ} {op_desc_op} other" + + if typ == "series": + base_doc = _flex_doc_SERIES + if op_desc["reverse"]: + base_doc += _see_also_reverse_SERIES.format( + reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"] + ) + doc_no_examples = base_doc.format( + desc=op_desc["desc"], + op_name=op_name, + equiv=equiv, + series_returns=op_desc["series_returns"], + ) + ser_example = op_desc["series_examples"] + if ser_example: + doc = doc_no_examples + ser_example + else: + doc = doc_no_examples + elif typ == "dataframe": + if op_name in ["eq", "ne", "le", "lt", "ge", "gt"]: + base_doc = _flex_comp_doc_FRAME + doc = _flex_comp_doc_FRAME.format( + op_name=op_name, + desc=op_desc["desc"], + ) + else: + base_doc = _flex_doc_FRAME + doc = base_doc.format( + desc=op_desc["desc"], + op_name=op_name, + equiv=equiv, + reverse=op_desc["reverse"], + ) + else: + raise AssertionError("Invalid typ argument.") + return doc + + +_common_examples_algebra_SERIES = """ +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +dtype: float64 +>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) +>>> b +a 1.0 +b NaN +d 1.0 +e NaN +dtype: float64""" + +_common_examples_comparison_SERIES = """ +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +e 1.0 +dtype: float64 +>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f']) +>>> b +a 0.0 +b 1.0 +c 2.0 +d NaN +f 1.0 +dtype: float64""" + +_add_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.add(b, fill_value=0) +a 2.0 +b 1.0 +c 1.0 +d 1.0 +e NaN +dtype: float64 +""" +) + +_sub_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.subtract(b, fill_value=0) +a 0.0 +b 1.0 +c 1.0 +d -1.0 +e NaN +dtype: float64 +""" +) + +_mul_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.multiply(b, fill_value=0) +a 1.0 +b 0.0 +c 0.0 +d 0.0 +e NaN +dtype: float64 +""" +) + +_div_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.divide(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 +""" +) + +_floordiv_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.floordiv(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 +""" +) + +_divmod_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.divmod(b, fill_value=0) +(a 1.0 + b inf + c inf + d 0.0 + e NaN + dtype: float64, + a 0.0 + b NaN + c NaN + d 0.0 + e NaN + dtype: float64) +""" +) + +_mod_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.mod(b, fill_value=0) +a 0.0 +b NaN +c NaN +d 0.0 +e NaN +dtype: float64 +""" +) +_pow_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.pow(b, fill_value=0) +a 1.0 +b 1.0 +c 1.0 +d 0.0 +e NaN +dtype: float64 +""" +) + +_ne_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.ne(b, fill_value=0) +a False +b True +c True +d True +e True +dtype: bool +""" +) + +_eq_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.eq(b, fill_value=0) +a True +b False +c False +d False +e False +dtype: bool +""" +) + +_lt_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.lt(b, fill_value=0) +a False +b False +c True +d False +e False +f True +dtype: bool +""" +) + +_le_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.le(b, fill_value=0) +a False +b True +c True +d False +e False +f True +dtype: bool +""" +) + +_gt_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.gt(b, fill_value=0) +a True +b False +c False +d False +e True +f False +dtype: bool +""" +) + +_ge_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.ge(b, fill_value=0) +a True +b True +c False +d False +e True +f False +dtype: bool +""" +) + +_returns_series = """Series\n The result of the operation.""" + +_returns_tuple = """2-Tuple of Series\n The result of the operation.""" + +_op_descriptions: dict[str, dict[str, str | None]] = { + # Arithmetic Operators + "add": { + "op": "+", + "desc": "Addition", + "reverse": "radd", + "series_examples": _add_example_SERIES, + "series_returns": _returns_series, + }, + "sub": { + "op": "-", + "desc": "Subtraction", + "reverse": "rsub", + "series_examples": _sub_example_SERIES, + "series_returns": _returns_series, + }, + "mul": { + "op": "*", + "desc": "Multiplication", + "reverse": "rmul", + "series_examples": _mul_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "mod": { + "op": "%", + "desc": "Modulo", + "reverse": "rmod", + "series_examples": _mod_example_SERIES, + "series_returns": _returns_series, + }, + "pow": { + "op": "**", + "desc": "Exponential power", + "reverse": "rpow", + "series_examples": _pow_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "truediv": { + "op": "/", + "desc": "Floating division", + "reverse": "rtruediv", + "series_examples": _div_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "floordiv": { + "op": "//", + "desc": "Integer division", + "reverse": "rfloordiv", + "series_examples": _floordiv_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "divmod": { + "op": "divmod", + "desc": "Integer division and modulo", + "reverse": "rdivmod", + "series_examples": _divmod_example_SERIES, + "series_returns": _returns_tuple, + "df_examples": None, + }, + # Comparison Operators + "eq": { + "op": "==", + "desc": "Equal to", + "reverse": None, + "series_examples": _eq_example_SERIES, + "series_returns": _returns_series, + }, + "ne": { + "op": "!=", + "desc": "Not equal to", + "reverse": None, + "series_examples": _ne_example_SERIES, + "series_returns": _returns_series, + }, + "lt": { + "op": "<", + "desc": "Less than", + "reverse": None, + "series_examples": _lt_example_SERIES, + "series_returns": _returns_series, + }, + "le": { + "op": "<=", + "desc": "Less than or equal to", + "reverse": None, + "series_examples": _le_example_SERIES, + "series_returns": _returns_series, + }, + "gt": { + "op": ">", + "desc": "Greater than", + "reverse": None, + "series_examples": _gt_example_SERIES, + "series_returns": _returns_series, + }, + "ge": { + "op": ">=", + "desc": "Greater than or equal to", + "reverse": None, + "series_examples": _ge_example_SERIES, + "series_returns": _returns_series, + }, +} + +_py_num_ref = """see + `Python documentation + `_ + for more details""" +_op_names = list(_op_descriptions.keys()) +for key in _op_names: + reverse_op = _op_descriptions[key]["reverse"] + if reverse_op is not None: + _op_descriptions[reverse_op] = _op_descriptions[key].copy() + _op_descriptions[reverse_op]["reverse"] = key + _op_descriptions[key][ + "see_also_desc" + ] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}" + _op_descriptions[reverse_op][ + "see_also_desc" + ] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}" + +_flex_doc_SERIES = """ +Return {desc} of series and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value for +missing data in either one of the inputs. + +Parameters +---------- +other : Series or scalar value +level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level. +fill_value : None or float value, default None (NaN) + Fill existing missing (NaN) values, and any new element needed for + successful Series alignment, with this value before computation. + If data in both corresponding Series locations is missing + the result of filling (at that location) will be missing. +axis : {{0 or 'index'}} + Unused. Parameter needed for compatibility with DataFrame. + +Returns +------- +{series_returns} +""" + +_see_also_reverse_SERIES = """ +See Also +-------- +Series.{reverse} : {see_also_desc}. +""" + +_flex_doc_FRAME = """ +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value +for missing data in one of the inputs. With reverse version, `{reverse}`. + +Among flexible wrappers (`add`, `sub`, `mul`, `div`, `floordiv`, `mod`, `pow`) to +arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. + +Parameters +---------- +other : scalar, sequence, Series, dict or DataFrame + Any single or multiple element data structure, or list-like object. +axis : {{0 or 'index', 1 or 'columns'}} + Whether to compare by the index (0 or 'index') or columns. + (1 or 'columns'). For Series input, axis to match Series index on. +level : int or label + Broadcast across a level, matching Index values on the + passed MultiIndex level. +fill_value : float or None, default None + Fill existing missing (NaN) values, and any new element needed for + successful DataFrame alignment, with this value before computation. + If data in both corresponding DataFrame locations is missing + the result will be missing. + +Returns +------- +DataFrame + Result of the arithmetic operation. + +See Also +-------- +DataFrame.add : Add DataFrames. +DataFrame.sub : Subtract DataFrames. +DataFrame.mul : Multiply DataFrames. +DataFrame.div : Divide DataFrames (float division). +DataFrame.truediv : Divide DataFrames (float division). +DataFrame.floordiv : Divide DataFrames (integer division). +DataFrame.mod : Calculate modulo (remainder after division). +DataFrame.pow : Calculate exponential power. + +Notes +----- +Mismatched indices will be unioned together. + +Examples +-------- +>>> df = pd.DataFrame({{'angles': [0, 3, 4], +... 'degrees': [360, 180, 360]}}, +... index=['circle', 'triangle', 'rectangle']) +>>> df + angles degrees +circle 0 360 +triangle 3 180 +rectangle 4 360 + +Add a scalar with operator version which return the same +results. + +>>> df + 1 + angles degrees +circle 1 361 +triangle 4 181 +rectangle 5 361 + +>>> df.add(1) + angles degrees +circle 1 361 +triangle 4 181 +rectangle 5 361 + +Divide by constant with reverse version. + +>>> df.div(10) + angles degrees +circle 0.0 36.0 +triangle 0.3 18.0 +rectangle 0.4 36.0 + +>>> df.rdiv(10) + angles degrees +circle inf 0.027778 +triangle 3.333333 0.055556 +rectangle 2.500000 0.027778 + +Subtract a list and Series by axis with operator version. + +>>> df - [1, 2] + angles degrees +circle -1 358 +triangle 2 178 +rectangle 3 358 + +>>> df.sub([1, 2], axis='columns') + angles degrees +circle -1 358 +triangle 2 178 +rectangle 3 358 + +>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']), +... axis='index') + angles degrees +circle -1 359 +triangle 2 179 +rectangle 3 359 + +Multiply a dictionary by axis. + +>>> df.mul({{'angles': 0, 'degrees': 2}}) + angles degrees +circle 0 720 +triangle 0 360 +rectangle 0 720 + +>>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index') + angles degrees +circle 0 0 +triangle 6 360 +rectangle 12 1080 + +Multiply a DataFrame of different shape with operator version. + +>>> other = pd.DataFrame({{'angles': [0, 3, 4]}}, +... index=['circle', 'triangle', 'rectangle']) +>>> other + angles +circle 0 +triangle 3 +rectangle 4 + +>>> df * other + angles degrees +circle 0 NaN +triangle 9 NaN +rectangle 16 NaN + +>>> df.mul(other, fill_value=0) + angles degrees +circle 0 0.0 +triangle 9 0.0 +rectangle 16 0.0 + +Divide by a MultiIndex by level. + +>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6], +... 'degrees': [360, 180, 360, 360, 540, 720]}}, +... index=[['A', 'A', 'A', 'B', 'B', 'B'], +... ['circle', 'triangle', 'rectangle', +... 'square', 'pentagon', 'hexagon']]) +>>> df_multindex + angles degrees +A circle 0 360 + triangle 3 180 + rectangle 4 360 +B square 4 360 + pentagon 5 540 + hexagon 6 720 + +>>> df.div(df_multindex, level=1, fill_value=0) + angles degrees +A circle NaN 1.0 + triangle 1.0 1.0 + rectangle 1.0 1.0 +B square 0.0 0.0 + pentagon 0.0 0.0 + hexagon 0.0 0.0 +""" + +_flex_comp_doc_FRAME = """ +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison +operators. + +Equivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis +(rows or columns) and level for comparison. + +Parameters +---------- +other : scalar, sequence, Series, or DataFrame + Any single or multiple element data structure, or list-like object. +axis : {{0 or 'index', 1 or 'columns'}}, default 'columns' + Whether to compare by the index (0 or 'index') or columns + (1 or 'columns'). +level : int or label + Broadcast across a level, matching Index values on the passed + MultiIndex level. + +Returns +------- +DataFrame of bool + Result of the comparison. + +See Also +-------- +DataFrame.eq : Compare DataFrames for equality elementwise. +DataFrame.ne : Compare DataFrames for inequality elementwise. +DataFrame.le : Compare DataFrames for less than inequality + or equality elementwise. +DataFrame.lt : Compare DataFrames for strictly less than + inequality elementwise. +DataFrame.ge : Compare DataFrames for greater than inequality + or equality elementwise. +DataFrame.gt : Compare DataFrames for strictly greater than + inequality elementwise. + +Notes +----- +Mismatched indices will be unioned together. +`NaN` values are considered different (i.e. `NaN` != `NaN`). + +Examples +-------- +>>> df = pd.DataFrame({{'cost': [250, 150, 100], +... 'revenue': [100, 250, 300]}}, +... index=['A', 'B', 'C']) +>>> df + cost revenue +A 250 100 +B 150 250 +C 100 300 + +Comparison with a scalar, using either the operator or method: + +>>> df == 100 + cost revenue +A False True +B False False +C True False + +>>> df.eq(100) + cost revenue +A False True +B False False +C True False + +When `other` is a :class:`Series`, the columns of a DataFrame are aligned +with the index of `other` and broadcast: + +>>> df != pd.Series([100, 250], index=["cost", "revenue"]) + cost revenue +A True True +B True False +C False True + +Use the method to control the broadcast axis: + +>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index') + cost revenue +A True False +B True True +C True True +D True True + +When comparing to an arbitrary sequence, the number of columns must +match the number elements in `other`: + +>>> df == [250, 100] + cost revenue +A True True +B False False +C False False + +Use the method to control the axis: + +>>> df.eq([250, 250, 100], axis='index') + cost revenue +A True False +B False True +C True False + +Compare to a DataFrame of different shape. + +>>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}}, +... index=['A', 'B', 'C', 'D']) +>>> other + revenue +A 300 +B 250 +C 100 +D 150 + +>>> df.gt(other) + cost revenue +A False False +B False False +C False True +D False False + +Compare to a MultiIndex by level. + +>>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220], +... 'revenue': [100, 250, 300, 200, 175, 225]}}, +... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'], +... ['A', 'B', 'C', 'A', 'B', 'C']]) +>>> df_multindex + cost revenue +Q1 A 250 100 + B 150 250 + C 100 300 +Q2 A 150 200 + B 300 175 + C 220 225 + +>>> df.le(df_multindex, level=1) + cost revenue +Q1 A True True + B True True + C True True +Q2 A False True + B True False + C True False +""" diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/invalid.py b/venv/lib/python3.10/site-packages/pandas/core/ops/invalid.py new file mode 100644 index 0000000000000000000000000000000000000000..e5ae6d359ac2205b01706211382d116b29176c7a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/ops/invalid.py @@ -0,0 +1,62 @@ +""" +Templates for invalid operations. +""" +from __future__ import annotations + +import operator +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +def invalid_comparison(left, right, op) -> npt.NDArray[np.bool_]: + """ + If a comparison has mismatched types and is not necessarily meaningful, + follow python3 conventions by: + + - returning all-False for equality + - returning all-True for inequality + - raising TypeError otherwise + + Parameters + ---------- + left : array-like + right : scalar, array-like + op : operator.{eq, ne, lt, le, gt} + + Raises + ------ + TypeError : on inequality comparisons + """ + if op is operator.eq: + res_values = np.zeros(left.shape, dtype=bool) + elif op is operator.ne: + res_values = np.ones(left.shape, dtype=bool) + else: + typ = type(right).__name__ + raise TypeError(f"Invalid comparison between dtype={left.dtype} and {typ}") + return res_values + + +def make_invalid_op(name: str): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + + def invalid_op(self, other=None): + typ = type(self).__name__ + raise TypeError(f"cannot perform {name} with this index type: {typ}") + + invalid_op.__name__ = name + return invalid_op diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py b/venv/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..adc1f63c568bf579f31b13446f4614435d443df1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py @@ -0,0 +1,189 @@ +""" +Ops for masked arrays. +""" +from __future__ import annotations + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) + + +def kleene_or( + left: bool | np.ndarray | libmissing.NAType, + right: bool | np.ndarray | libmissing.NAType, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``or`` using Kleene logic. + + Values are NA where we have ``NA | NA`` or ``NA | False``. + ``NA | True`` is considered True. + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical or, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A | B == B | A + if left_mask is None: + return kleene_or(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + + raise_for_nan(right, method="or") + + if right is libmissing.NA: + result = left.copy() + else: + result = left | right + + if right_mask is not None: + # output is unknown where (False & NA), (NA & False), (NA & NA) + left_false = ~(left | left_mask) + right_false = ~(right | right_mask) + mask = ( + (left_false & right_mask) + | (right_false & left_mask) + | (left_mask & right_mask) + ) + else: + if right is True: + mask = np.zeros_like(left_mask) + elif right is libmissing.NA: + mask = (~left & ~left_mask) | left_mask + else: + # False + mask = left_mask.copy() + + return result, mask + + +def kleene_xor( + left: bool | np.ndarray | libmissing.NAType, + right: bool | np.ndarray | libmissing.NAType, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``xor`` using Kleene logic. + + This is the same as ``or``, with the following adjustments + + * True, True -> False + * True, NA -> NA + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical xor, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A ^ B == B ^ A + if left_mask is None: + return kleene_xor(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + + raise_for_nan(right, method="xor") + if right is libmissing.NA: + result = np.zeros_like(left) + else: + result = left ^ right + + if right_mask is None: + if right is libmissing.NA: + mask = np.ones_like(left_mask) + else: + mask = left_mask.copy() + else: + mask = left_mask | right_mask + + return result, mask + + +def kleene_and( + left: bool | libmissing.NAType | np.ndarray, + right: bool | libmissing.NAType | np.ndarray, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``and`` using Kleene logic. + + Values are ``NA`` for ``NA & NA`` or ``True & NA``. + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical xor, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A & B == B & A + if left_mask is None: + return kleene_and(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + raise_for_nan(right, method="and") + + if right is libmissing.NA: + result = np.zeros_like(left) + else: + result = left & right + + if right_mask is None: + # Scalar `right` + if right is libmissing.NA: + mask = (left & ~left_mask) | left_mask + + else: + mask = left_mask.copy() + if right is False: + # unmask everything + mask[:] = False + else: + # unmask where either left or right is False + left_false = ~(left | left_mask) + right_false = ~(right | right_mask) + mask = (left_mask & ~right_false) | (right_mask & ~left_false) + + return result, mask + + +def raise_for_nan(value, method: str) -> None: + if lib.is_float(value) and np.isnan(value): + raise ValueError(f"Cannot perform logical '{method}' with floating NaN") diff --git a/venv/lib/python3.10/site-packages/pandas/core/ops/missing.py b/venv/lib/python3.10/site-packages/pandas/core/ops/missing.py new file mode 100644 index 0000000000000000000000000000000000000000..fc685935a35fceab74012912d3c3cae65b9c1818 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/ops/missing.py @@ -0,0 +1,176 @@ +""" +Missing data handling for arithmetic operations. + +In particular, pandas conventions regarding division by zero differ +from numpy in the following ways: + 1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2) + gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for + the remaining pairs + (the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN). + + pandas convention is to return [-inf, nan, inf] for all dtype + combinations. + + Note: the numpy behavior described here is py3-specific. + + 2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2) + gives precisely the same results as the // operation. + + pandas convention is to return [nan, nan, nan] for all dtype + combinations. + + 3) divmod behavior consistent with 1) and 2). +""" +from __future__ import annotations + +import operator + +import numpy as np + +from pandas.core import roperator + + +def _fill_zeros(result: np.ndarray, x, y): + """ + If this is a reversed op, then flip x,y + + If we have an integer value (or array in y) + and we have 0's, fill them with np.nan, + return the result. + + Mask the nan's from x. + """ + if result.dtype.kind == "f": + return result + + is_variable_type = hasattr(y, "dtype") + is_scalar_type = not isinstance(y, np.ndarray) + + if not is_variable_type and not is_scalar_type: + # e.g. test_series_ops_name_retention with mod we get here with list/tuple + return result + + if is_scalar_type: + y = np.array(y) + + if y.dtype.kind in "iu": + ymask = y == 0 + if ymask.any(): + # GH#7325, mask and nans must be broadcastable + mask = ymask & ~np.isnan(result) + + # GH#9308 doing ravel on result and mask can improve putmask perf, + # but can also make unwanted copies. + result = result.astype("float64", copy=False) + + np.putmask(result, mask, np.nan) + + return result + + +def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray: + """ + Set results of 0 // 0 to np.nan, regardless of the dtypes + of the numerator or the denominator. + + Parameters + ---------- + x : ndarray + y : ndarray + result : ndarray + + Returns + ------- + ndarray + The filled result. + + Examples + -------- + >>> x = np.array([1, 0, -1], dtype=np.int64) + >>> x + array([ 1, 0, -1]) + >>> y = 0 # int 0; numpy behavior is different with float + >>> result = x // y + >>> result # raw numpy result does not fill division by zero + array([0, 0, 0]) + >>> mask_zero_div_zero(x, y, result) + array([ inf, nan, -inf]) + """ + + if not hasattr(y, "dtype"): + # e.g. scalar, tuple + y = np.array(y) + if not hasattr(x, "dtype"): + # e.g scalar, tuple + x = np.array(x) + + zmask = y == 0 + + if zmask.any(): + # Flip sign if necessary for -0.0 + zneg_mask = zmask & np.signbit(y) + zpos_mask = zmask & ~zneg_mask + + x_lt0 = x < 0 + x_gt0 = x > 0 + nan_mask = zmask & (x == 0) + neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0) + posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0) + + if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): + # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN + result = result.astype("float64", copy=False) + + result[nan_mask] = np.nan + result[posinf_mask] = np.inf + result[neginf_mask] = -np.inf + + return result + + +def dispatch_fill_zeros(op, left, right, result): + """ + Call _fill_zeros with the appropriate fill value depending on the operation, + with special logic for divmod and rdivmod. + + Parameters + ---------- + op : function (operator.add, operator.div, ...) + left : object (np.ndarray for non-reversed ops) + We have excluded ExtensionArrays here + right : object (np.ndarray for reversed ops) + We have excluded ExtensionArrays here + result : ndarray + + Returns + ------- + result : np.ndarray + + Notes + ----- + For divmod and rdivmod, the `result` parameter and returned `result` + is a 2-tuple of ndarray objects. + """ + if op is divmod: + result = ( + mask_zero_div_zero(left, right, result[0]), + _fill_zeros(result[1], left, right), + ) + elif op is roperator.rdivmod: + result = ( + mask_zero_div_zero(right, left, result[0]), + _fill_zeros(result[1], right, left), + ) + elif op is operator.floordiv: + # Note: no need to do this for truediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(left, right, result) + elif op is roperator.rfloordiv: + # Note: no need to do this for rtruediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(right, left, result) + elif op is operator.mod: + result = _fill_zeros(result, left, right) + elif op is roperator.rmod: + result = _fill_zeros(result, right, left) + return result diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e3858362e8850fcaf743e2fda19b62beabf4539 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8852a0696e31dd17ce5151c0a2fc959afe55cb6e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/concat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb7b1ae034c5db992be1888c59fb52c6f12bbecd Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/concat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/encoding.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/encoding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16dba24e4668e1aeff4dddecaae9c218c8594660 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/encoding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/melt.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/melt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..369968f6c1cfc1334f59d89ced5560e3738deff9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/melt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/merge.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/merge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7564d816e8dfe5d8deeee0d7efb06038a0d76526 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/merge.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/pivot.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/pivot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be5f9f6735680dd9d9c107a957965622b802b165 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/pivot.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/reshape.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/reshape.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08a3519906b3eba75c91813c9d537e3ee7b89d6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/reshape.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/tile.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/tile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6a2c22a91d2314f4d3b64c008d86c6712eb3484 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/tile.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77e74b570ad4bf4c6cab65b08f3a5360260f0e89 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/reshape/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/api.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/api.py new file mode 100644 index 0000000000000000000000000000000000000000..b1884c497f0ad7000351e131ef11dadab4a7c700 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/api.py @@ -0,0 +1,41 @@ +from pandas.core.reshape.concat import concat +from pandas.core.reshape.encoding import ( + from_dummies, + get_dummies, +) +from pandas.core.reshape.melt import ( + lreshape, + melt, + wide_to_long, +) +from pandas.core.reshape.merge import ( + merge, + merge_asof, + merge_ordered, +) +from pandas.core.reshape.pivot import ( + crosstab, + pivot, + pivot_table, +) +from pandas.core.reshape.tile import ( + cut, + qcut, +) + +__all__ = [ + "concat", + "crosstab", + "cut", + "from_dummies", + "get_dummies", + "lreshape", + "melt", + "merge", + "merge_asof", + "merge_ordered", + "pivot", + "pivot_table", + "qcut", + "wide_to_long", +] diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/concat.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/concat.py new file mode 100644 index 0000000000000000000000000000000000000000..dc18bb65b35bcfa5c3789b35e7d41690923b50a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/concat.py @@ -0,0 +1,894 @@ +""" +Concat routines. +""" +from __future__ import annotations + +from collections import abc +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_bool, + is_iterator, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import isna + +from pandas.core.arrays.categorical import ( + factorize_from_iterable, + factorize_from_iterables, +) +import pandas.core.common as com +from pandas.core.indexes.api import ( + Index, + MultiIndex, + all_indexes_same, + default_index, + ensure_index, + get_objs_combined_axis, + get_unanimous_names, +) +from pandas.core.internals import concatenate_managers + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + ) + + from pandas._typing import ( + Axis, + AxisInt, + HashableT, + ) + + from pandas import ( + DataFrame, + Series, + ) + +# --------------------------------------------------------------------- +# Concatenate DataFrame objects + + +@overload +def concat( + objs: Iterable[DataFrame] | Mapping[HashableT, DataFrame], + *, + axis: Literal[0, "index"] = ..., + join: str = ..., + ignore_index: bool = ..., + keys: Iterable[Hashable] | None = ..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> DataFrame: + ... + + +@overload +def concat( + objs: Iterable[Series] | Mapping[HashableT, Series], + *, + axis: Literal[0, "index"] = ..., + join: str = ..., + ignore_index: bool = ..., + keys: Iterable[Hashable] | None = ..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> Series: + ... + + +@overload +def concat( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + *, + axis: Literal[0, "index"] = ..., + join: str = ..., + ignore_index: bool = ..., + keys: Iterable[Hashable] | None = ..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> DataFrame | Series: + ... + + +@overload +def concat( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + *, + axis: Literal[1, "columns"], + join: str = ..., + ignore_index: bool = ..., + keys: Iterable[Hashable] | None = ..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> DataFrame: + ... + + +@overload +def concat( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + *, + axis: Axis = ..., + join: str = ..., + ignore_index: bool = ..., + keys: Iterable[Hashable] | None = ..., + levels=..., + names: list[HashableT] | None = ..., + verify_integrity: bool = ..., + sort: bool = ..., + copy: bool | None = ..., +) -> DataFrame | Series: + ... + + +def concat( + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + *, + axis: Axis = 0, + join: str = "outer", + ignore_index: bool = False, + keys: Iterable[Hashable] | None = None, + levels=None, + names: list[HashableT] | None = None, + verify_integrity: bool = False, + sort: bool = False, + copy: bool | None = None, +) -> DataFrame | Series: + """ + Concatenate pandas objects along a particular axis. + + Allows optional set logic along the other axes. + + Can also add a layer of hierarchical indexing on the concatenation axis, + which may be useful if the labels are the same (or overlapping) on + the passed axis number. + + Parameters + ---------- + objs : a sequence or mapping of Series or DataFrame objects + If a mapping is passed, the sorted keys will be used as the `keys` + argument, unless it is passed, in which case the values will be + selected (see below). Any None objects will be dropped silently unless + they are all None in which case a ValueError will be raised. + axis : {0/'index', 1/'columns'}, default 0 + The axis to concatenate along. + join : {'inner', 'outer'}, default 'outer' + How to handle indexes on other axis (or axes). + ignore_index : bool, default False + If True, do not use the index values along the concatenation axis. The + resulting axis will be labeled 0, ..., n - 1. This is useful if you are + concatenating objects where the concatenation axis does not have + meaningful indexing information. Note the index values on the other + axes are still respected in the join. + keys : sequence, default None + If multiple levels passed, should contain tuples. Construct + hierarchical index using the passed keys as the outermost level. + levels : list of sequences, default None + Specific levels (unique values) to use for constructing a + MultiIndex. Otherwise they will be inferred from the keys. + names : list, default None + Names for the levels in the resulting hierarchical index. + verify_integrity : bool, default False + Check whether the new concatenated axis contains duplicates. This can + be very expensive relative to the actual data concatenation. + sort : bool, default False + Sort non-concatenation axis if it is not already aligned. One exception to + this is when the non-concatentation axis is a DatetimeIndex and join='outer' + and the axis is not already aligned. In that case, the non-concatenation + axis is always sorted lexicographically. + copy : bool, default True + If False, do not copy data unnecessarily. + + Returns + ------- + object, type of objs + When concatenating all ``Series`` along the index (axis=0), a + ``Series`` is returned. When ``objs`` contains at least one + ``DataFrame``, a ``DataFrame`` is returned. When concatenating along + the columns (axis=1), a ``DataFrame`` is returned. + + See Also + -------- + DataFrame.join : Join DataFrames using indexes. + DataFrame.merge : Merge DataFrames by indexes or columns. + + Notes + ----- + The keys, levels, and names arguments are all optional. + + A walkthrough of how this method fits in with other tools for combining + pandas objects can be found `here + `__. + + It is not recommended to build DataFrames by adding single rows in a + for loop. Build a list of rows and make a DataFrame in a single concat. + + Examples + -------- + Combine two ``Series``. + + >>> s1 = pd.Series(['a', 'b']) + >>> s2 = pd.Series(['c', 'd']) + >>> pd.concat([s1, s2]) + 0 a + 1 b + 0 c + 1 d + dtype: object + + Clear the existing index and reset it in the result + by setting the ``ignore_index`` option to ``True``. + + >>> pd.concat([s1, s2], ignore_index=True) + 0 a + 1 b + 2 c + 3 d + dtype: object + + Add a hierarchical index at the outermost level of + the data with the ``keys`` option. + + >>> pd.concat([s1, s2], keys=['s1', 's2']) + s1 0 a + 1 b + s2 0 c + 1 d + dtype: object + + Label the index keys you create with the ``names`` option. + + >>> pd.concat([s1, s2], keys=['s1', 's2'], + ... names=['Series name', 'Row ID']) + Series name Row ID + s1 0 a + 1 b + s2 0 c + 1 d + dtype: object + + Combine two ``DataFrame`` objects with identical columns. + + >>> df1 = pd.DataFrame([['a', 1], ['b', 2]], + ... columns=['letter', 'number']) + >>> df1 + letter number + 0 a 1 + 1 b 2 + >>> df2 = pd.DataFrame([['c', 3], ['d', 4]], + ... columns=['letter', 'number']) + >>> df2 + letter number + 0 c 3 + 1 d 4 + >>> pd.concat([df1, df2]) + letter number + 0 a 1 + 1 b 2 + 0 c 3 + 1 d 4 + + Combine ``DataFrame`` objects with overlapping columns + and return everything. Columns outside the intersection will + be filled with ``NaN`` values. + + >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']], + ... columns=['letter', 'number', 'animal']) + >>> df3 + letter number animal + 0 c 3 cat + 1 d 4 dog + >>> pd.concat([df1, df3], sort=False) + letter number animal + 0 a 1 NaN + 1 b 2 NaN + 0 c 3 cat + 1 d 4 dog + + Combine ``DataFrame`` objects with overlapping columns + and return only those that are shared by passing ``inner`` to + the ``join`` keyword argument. + + >>> pd.concat([df1, df3], join="inner") + letter number + 0 a 1 + 1 b 2 + 0 c 3 + 1 d 4 + + Combine ``DataFrame`` objects horizontally along the x axis by + passing in ``axis=1``. + + >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']], + ... columns=['animal', 'name']) + >>> pd.concat([df1, df4], axis=1) + letter number animal name + 0 a 1 bird polly + 1 b 2 monkey george + + Prevent the result from including duplicate index values with the + ``verify_integrity`` option. + + >>> df5 = pd.DataFrame([1], index=['a']) + >>> df5 + 0 + a 1 + >>> df6 = pd.DataFrame([2], index=['a']) + >>> df6 + 0 + a 2 + >>> pd.concat([df5, df6], verify_integrity=True) + Traceback (most recent call last): + ... + ValueError: Indexes have overlapping values: ['a'] + + Append a single row to the end of a ``DataFrame`` object. + + >>> df7 = pd.DataFrame({'a': 1, 'b': 2}, index=[0]) + >>> df7 + a b + 0 1 2 + >>> new_row = pd.Series({'a': 3, 'b': 4}) + >>> new_row + a 3 + b 4 + dtype: int64 + >>> pd.concat([df7, new_row.to_frame().T], ignore_index=True) + a b + 0 1 2 + 1 3 4 + """ + if copy is None: + if using_copy_on_write(): + copy = False + else: + copy = True + elif copy and using_copy_on_write(): + copy = False + + op = _Concatenator( + objs, + axis=axis, + ignore_index=ignore_index, + join=join, + keys=keys, + levels=levels, + names=names, + verify_integrity=verify_integrity, + copy=copy, + sort=sort, + ) + + return op.get_result() + + +class _Concatenator: + """ + Orchestrates a concatenation operation for BlockManagers + """ + + sort: bool + + def __init__( + self, + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + axis: Axis = 0, + join: str = "outer", + keys: Iterable[Hashable] | None = None, + levels=None, + names: list[HashableT] | None = None, + ignore_index: bool = False, + verify_integrity: bool = False, + copy: bool = True, + sort: bool = False, + ) -> None: + if isinstance(objs, (ABCSeries, ABCDataFrame, str)): + raise TypeError( + "first argument must be an iterable of pandas " + f'objects, you passed an object of type "{type(objs).__name__}"' + ) + + if join == "outer": + self.intersect = False + elif join == "inner": + self.intersect = True + else: # pragma: no cover + raise ValueError( + "Only can inner (intersect) or outer (union) join the other axis" + ) + + if not is_bool(sort): + raise ValueError( + f"The 'sort' keyword only accepts boolean values; {sort} was passed." + ) + # Incompatible types in assignment (expression has type "Union[bool, bool_]", + # variable has type "bool") + self.sort = sort # type: ignore[assignment] + + self.ignore_index = ignore_index + self.verify_integrity = verify_integrity + self.copy = copy + + objs, keys = self._clean_keys_and_objs(objs, keys) + + # figure out what our result ndim is going to be + ndims = self._get_ndims(objs) + sample, objs = self._get_sample_object(objs, ndims, keys, names, levels) + + # Standardize axis parameter to int + if sample.ndim == 1: + from pandas import DataFrame + + axis = DataFrame._get_axis_number(axis) + self._is_frame = False + self._is_series = True + else: + axis = sample._get_axis_number(axis) + self._is_frame = True + self._is_series = False + + # Need to flip BlockManager axis in the DataFrame special case + axis = sample._get_block_manager_axis(axis) + + # if we have mixed ndims, then convert to highest ndim + # creating column numbers as needed + if len(ndims) > 1: + objs = self._sanitize_mixed_ndim(objs, sample, ignore_index, axis) + + self.objs = objs + + # note: this is the BlockManager axis (since DataFrame is transposed) + self.bm_axis = axis + self.axis = 1 - self.bm_axis if self._is_frame else 0 + self.keys = keys + self.names = names or getattr(keys, "names", None) + self.levels = levels + + def _get_ndims(self, objs: list[Series | DataFrame]) -> set[int]: + # figure out what our result ndim is going to be + ndims = set() + for obj in objs: + if not isinstance(obj, (ABCSeries, ABCDataFrame)): + msg = ( + f"cannot concatenate object of type '{type(obj)}'; " + "only Series and DataFrame objs are valid" + ) + raise TypeError(msg) + + ndims.add(obj.ndim) + return ndims + + def _clean_keys_and_objs( + self, + objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], + keys, + ) -> tuple[list[Series | DataFrame], Index | None]: + if isinstance(objs, abc.Mapping): + if keys is None: + keys = list(objs.keys()) + objs_list = [objs[k] for k in keys] + else: + objs_list = list(objs) + + if len(objs_list) == 0: + raise ValueError("No objects to concatenate") + + if keys is None: + objs_list = list(com.not_none(*objs_list)) + else: + # GH#1649 + clean_keys = [] + clean_objs = [] + if is_iterator(keys): + keys = list(keys) + if len(keys) != len(objs_list): + # GH#43485 + warnings.warn( + "The behavior of pd.concat with len(keys) != len(objs) is " + "deprecated. In a future version this will raise instead of " + "truncating to the smaller of the two sequences", + FutureWarning, + stacklevel=find_stack_level(), + ) + for k, v in zip(keys, objs_list): + if v is None: + continue + clean_keys.append(k) + clean_objs.append(v) + objs_list = clean_objs + + if isinstance(keys, MultiIndex): + # TODO: retain levels? + keys = type(keys).from_tuples(clean_keys, names=keys.names) + else: + name = getattr(keys, "name", None) + keys = Index(clean_keys, name=name, dtype=getattr(keys, "dtype", None)) + + if len(objs_list) == 0: + raise ValueError("All objects passed were None") + + return objs_list, keys + + def _get_sample_object( + self, + objs: list[Series | DataFrame], + ndims: set[int], + keys, + names, + levels, + ) -> tuple[Series | DataFrame, list[Series | DataFrame]]: + # get the sample + # want the highest ndim that we have, and must be non-empty + # unless all objs are empty + sample: Series | DataFrame | None = None + if len(ndims) > 1: + max_ndim = max(ndims) + for obj in objs: + if obj.ndim == max_ndim and np.sum(obj.shape): + sample = obj + break + + else: + # filter out the empties if we have not multi-index possibilities + # note to keep empty Series as it affect to result columns / name + non_empties = [obj for obj in objs if sum(obj.shape) > 0 or obj.ndim == 1] + + if len(non_empties) and ( + keys is None and names is None and levels is None and not self.intersect + ): + objs = non_empties + sample = objs[0] + + if sample is None: + sample = objs[0] + return sample, objs + + def _sanitize_mixed_ndim( + self, + objs: list[Series | DataFrame], + sample: Series | DataFrame, + ignore_index: bool, + axis: AxisInt, + ) -> list[Series | DataFrame]: + # if we have mixed ndims, then convert to highest ndim + # creating column numbers as needed + + new_objs = [] + + current_column = 0 + max_ndim = sample.ndim + for obj in objs: + ndim = obj.ndim + if ndim == max_ndim: + pass + + elif ndim != max_ndim - 1: + raise ValueError( + "cannot concatenate unaligned mixed dimensional NDFrame objects" + ) + + else: + name = getattr(obj, "name", None) + if ignore_index or name is None: + if axis == 1: + # doing a row-wise concatenation so need everything + # to line up + name = 0 + else: + # doing a column-wise concatenation so need series + # to have unique names + name = current_column + current_column += 1 + + obj = sample._constructor({name: obj}, copy=False) + + new_objs.append(obj) + + return new_objs + + def get_result(self): + cons: Callable[..., DataFrame | Series] + sample: DataFrame | Series + + # series only + if self._is_series: + sample = cast("Series", self.objs[0]) + + # stack blocks + if self.bm_axis == 0: + name = com.consensus_name_attr(self.objs) + cons = sample._constructor + + arrs = [ser._values for ser in self.objs] + + res = concat_compat(arrs, axis=0) + + new_index: Index + if self.ignore_index: + # We can avoid surprisingly-expensive _get_concat_axis + new_index = default_index(len(res)) + else: + new_index = self.new_axes[0] + + mgr = type(sample._mgr).from_array(res, index=new_index) + + result = sample._constructor_from_mgr(mgr, axes=mgr.axes) + result._name = name + return result.__finalize__(self, method="concat") + + # combine as columns in a frame + else: + data = dict(zip(range(len(self.objs)), self.objs)) + + # GH28330 Preserves subclassed objects through concat + cons = sample._constructor_expanddim + + index, columns = self.new_axes + df = cons(data, index=index, copy=self.copy) + df.columns = columns + return df.__finalize__(self, method="concat") + + # combine block managers + else: + sample = cast("DataFrame", self.objs[0]) + + mgrs_indexers = [] + for obj in self.objs: + indexers = {} + for ax, new_labels in enumerate(self.new_axes): + # ::-1 to convert BlockManager ax to DataFrame ax + if ax == self.bm_axis: + # Suppress reindexing on concat axis + continue + + # 1-ax to convert BlockManager axis to DataFrame axis + obj_labels = obj.axes[1 - ax] + if not new_labels.equals(obj_labels): + indexers[ax] = obj_labels.get_indexer(new_labels) + + mgrs_indexers.append((obj._mgr, indexers)) + + new_data = concatenate_managers( + mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy + ) + if not self.copy and not using_copy_on_write(): + new_data._consolidate_inplace() + + out = sample._constructor_from_mgr(new_data, axes=new_data.axes) + return out.__finalize__(self, method="concat") + + def _get_result_dim(self) -> int: + if self._is_series and self.bm_axis == 1: + return 2 + else: + return self.objs[0].ndim + + @cache_readonly + def new_axes(self) -> list[Index]: + ndim = self._get_result_dim() + return [ + self._get_concat_axis if i == self.bm_axis else self._get_comb_axis(i) + for i in range(ndim) + ] + + def _get_comb_axis(self, i: AxisInt) -> Index: + data_axis = self.objs[0]._get_block_manager_axis(i) + return get_objs_combined_axis( + self.objs, + axis=data_axis, + intersect=self.intersect, + sort=self.sort, + copy=self.copy, + ) + + @cache_readonly + def _get_concat_axis(self) -> Index: + """ + Return index to be used along concatenation axis. + """ + if self._is_series: + if self.bm_axis == 0: + indexes = [x.index for x in self.objs] + elif self.ignore_index: + idx = default_index(len(self.objs)) + return idx + elif self.keys is None: + names: list[Hashable] = [None] * len(self.objs) + num = 0 + has_names = False + for i, x in enumerate(self.objs): + if x.ndim != 1: + raise TypeError( + f"Cannot concatenate type 'Series' with " + f"object of type '{type(x).__name__}'" + ) + if x.name is not None: + names[i] = x.name + has_names = True + else: + names[i] = num + num += 1 + if has_names: + return Index(names) + else: + return default_index(len(self.objs)) + else: + return ensure_index(self.keys).set_names(self.names) + else: + indexes = [x.axes[self.axis] for x in self.objs] + + if self.ignore_index: + idx = default_index(sum(len(i) for i in indexes)) + return idx + + if self.keys is None: + if self.levels is not None: + raise ValueError("levels supported only when keys is not None") + concat_axis = _concat_indexes(indexes) + else: + concat_axis = _make_concat_multiindex( + indexes, self.keys, self.levels, self.names + ) + + self._maybe_check_integrity(concat_axis) + + return concat_axis + + def _maybe_check_integrity(self, concat_index: Index): + if self.verify_integrity: + if not concat_index.is_unique: + overlap = concat_index[concat_index.duplicated()].unique() + raise ValueError(f"Indexes have overlapping values: {overlap}") + + +def _concat_indexes(indexes) -> Index: + return indexes[0].append(indexes[1:]) + + +def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex: + if (levels is None and isinstance(keys[0], tuple)) or ( + levels is not None and len(levels) > 1 + ): + zipped = list(zip(*keys)) + if names is None: + names = [None] * len(zipped) + + if levels is None: + _, levels = factorize_from_iterables(zipped) + else: + levels = [ensure_index(x) for x in levels] + else: + zipped = [keys] + if names is None: + names = [None] + + if levels is None: + levels = [ensure_index(keys).unique()] + else: + levels = [ensure_index(x) for x in levels] + + for level in levels: + if not level.is_unique: + raise ValueError(f"Level values not unique: {level.tolist()}") + + if not all_indexes_same(indexes) or not all(level.is_unique for level in levels): + codes_list = [] + + # things are potentially different sizes, so compute the exact codes + # for each level and pass those to MultiIndex.from_arrays + + for hlevel, level in zip(zipped, levels): + to_concat = [] + if isinstance(hlevel, Index) and hlevel.equals(level): + lens = [len(idx) for idx in indexes] + codes_list.append(np.repeat(np.arange(len(hlevel)), lens)) + else: + for key, index in zip(hlevel, indexes): + # Find matching codes, include matching nan values as equal. + mask = (isna(level) & isna(key)) | (level == key) + if not mask.any(): + raise ValueError(f"Key {key} not in level {level}") + i = np.nonzero(mask)[0][0] + + to_concat.append(np.repeat(i, len(index))) + codes_list.append(np.concatenate(to_concat)) + + concat_index = _concat_indexes(indexes) + + # these go at the end + if isinstance(concat_index, MultiIndex): + levels.extend(concat_index.levels) + codes_list.extend(concat_index.codes) + else: + codes, categories = factorize_from_iterable(concat_index) + levels.append(categories) + codes_list.append(codes) + + if len(names) == len(levels): + names = list(names) + else: + # make sure that all of the passed indices have the same nlevels + if not len({idx.nlevels for idx in indexes}) == 1: + raise AssertionError( + "Cannot concat indices that do not have the same number of levels" + ) + + # also copies + names = list(names) + list(get_unanimous_names(*indexes)) + + return MultiIndex( + levels=levels, codes=codes_list, names=names, verify_integrity=False + ) + + new_index = indexes[0] + n = len(new_index) + kpieces = len(indexes) + + # also copies + new_names = list(names) + new_levels = list(levels) + + # construct codes + new_codes = [] + + # do something a bit more speedy + + for hlevel, level in zip(zipped, levels): + hlevel_index = ensure_index(hlevel) + mapped = level.get_indexer(hlevel_index) + + mask = mapped == -1 + if mask.any(): + raise ValueError( + f"Values not found in passed level: {hlevel_index[mask]!s}" + ) + + new_codes.append(np.repeat(mapped, n)) + + if isinstance(new_index, MultiIndex): + new_levels.extend(new_index.levels) + new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes]) + else: + new_levels.append(new_index.unique()) + single_codes = new_index.unique().get_indexer(new_index) + new_codes.append(np.tile(single_codes, kpieces)) + + if len(new_names) < len(new_levels): + new_names.extend(new_index.names) + + return MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/encoding.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..3ed67bb7b7c02509f0bf493c5116258f67dc0ef7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/encoding.py @@ -0,0 +1,570 @@ +from __future__ import annotations + +from collections import defaultdict +from collections.abc import ( + Hashable, + Iterable, +) +import itertools +from typing import ( + TYPE_CHECKING, + cast, +) + +import numpy as np + +from pandas._libs.sparse import IntIndex + +from pandas.core.dtypes.common import ( + is_integer_dtype, + is_list_like, + is_object_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtype, +) + +from pandas.core.arrays import SparseArray +from pandas.core.arrays.categorical import factorize_from_iterable +from pandas.core.arrays.string_ import StringDtype +from pandas.core.frame import DataFrame +from pandas.core.indexes.api import ( + Index, + default_index, +) +from pandas.core.series import Series + +if TYPE_CHECKING: + from pandas._typing import NpDtype + + +def get_dummies( + data, + prefix=None, + prefix_sep: str | Iterable[str] | dict[str, str] = "_", + dummy_na: bool = False, + columns=None, + sparse: bool = False, + drop_first: bool = False, + dtype: NpDtype | None = None, +) -> DataFrame: + """ + Convert categorical variable into dummy/indicator variables. + + Each variable is converted in as many 0/1 variables as there are different + values. Columns in the output are each named after a value; if the input is + a DataFrame, the name of the original variable is prepended to the value. + + Parameters + ---------- + data : array-like, Series, or DataFrame + Data of which to get dummy indicators. + prefix : str, list of str, or dict of str, default None + String to append DataFrame column names. + Pass a list with length equal to the number of columns + when calling get_dummies on a DataFrame. Alternatively, `prefix` + can be a dictionary mapping column names to prefixes. + prefix_sep : str, default '_' + If appending prefix, separator/delimiter to use. Or pass a + list or dictionary as with `prefix`. + dummy_na : bool, default False + Add a column to indicate NaNs, if False NaNs are ignored. + columns : list-like, default None + Column names in the DataFrame to be encoded. + If `columns` is None then all the columns with + `object`, `string`, or `category` dtype will be converted. + sparse : bool, default False + Whether the dummy-encoded columns should be backed by + a :class:`SparseArray` (True) or a regular NumPy array (False). + drop_first : bool, default False + Whether to get k-1 dummies out of k categorical levels by removing the + first level. + dtype : dtype, default bool + Data type for new columns. Only a single dtype is allowed. + + Returns + ------- + DataFrame + Dummy-coded data. If `data` contains other columns than the + dummy-coded one(s), these will be prepended, unaltered, to the result. + + See Also + -------- + Series.str.get_dummies : Convert Series of strings to dummy codes. + :func:`~pandas.from_dummies` : Convert dummy codes to categorical ``DataFrame``. + + Notes + ----- + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> s = pd.Series(list('abca')) + + >>> pd.get_dummies(s) + a b c + 0 True False False + 1 False True False + 2 False False True + 3 True False False + + >>> s1 = ['a', 'b', np.nan] + + >>> pd.get_dummies(s1) + a b + 0 True False + 1 False True + 2 False False + + >>> pd.get_dummies(s1, dummy_na=True) + a b NaN + 0 True False False + 1 False True False + 2 False False True + + >>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], + ... 'C': [1, 2, 3]}) + + >>> pd.get_dummies(df, prefix=['col1', 'col2']) + C col1_a col1_b col2_a col2_b col2_c + 0 1 True False False True False + 1 2 False True True False False + 2 3 True False False False True + + >>> pd.get_dummies(pd.Series(list('abcaa'))) + a b c + 0 True False False + 1 False True False + 2 False False True + 3 True False False + 4 True False False + + >>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True) + b c + 0 False False + 1 True False + 2 False True + 3 False False + 4 False False + + >>> pd.get_dummies(pd.Series(list('abc')), dtype=float) + a b c + 0 1.0 0.0 0.0 + 1 0.0 1.0 0.0 + 2 0.0 0.0 1.0 + """ + from pandas.core.reshape.concat import concat + + dtypes_to_encode = ["object", "string", "category"] + + if isinstance(data, DataFrame): + # determine columns being encoded + if columns is None: + data_to_encode = data.select_dtypes(include=dtypes_to_encode) + elif not is_list_like(columns): + raise TypeError("Input must be a list-like for parameter `columns`") + else: + data_to_encode = data[columns] + + # validate prefixes and separator to avoid silently dropping cols + def check_len(item, name: str): + if is_list_like(item): + if not len(item) == data_to_encode.shape[1]: + len_msg = ( + f"Length of '{name}' ({len(item)}) did not match the " + "length of the columns being encoded " + f"({data_to_encode.shape[1]})." + ) + raise ValueError(len_msg) + + check_len(prefix, "prefix") + check_len(prefix_sep, "prefix_sep") + + if isinstance(prefix, str): + prefix = itertools.cycle([prefix]) + if isinstance(prefix, dict): + prefix = [prefix[col] for col in data_to_encode.columns] + + if prefix is None: + prefix = data_to_encode.columns + + # validate separators + if isinstance(prefix_sep, str): + prefix_sep = itertools.cycle([prefix_sep]) + elif isinstance(prefix_sep, dict): + prefix_sep = [prefix_sep[col] for col in data_to_encode.columns] + + with_dummies: list[DataFrame] + if data_to_encode.shape == data.shape: + # Encoding the entire df, do not prepend any dropped columns + with_dummies = [] + elif columns is not None: + # Encoding only cols specified in columns. Get all cols not in + # columns to prepend to result. + with_dummies = [data.drop(columns, axis=1)] + else: + # Encoding only object and category dtype columns. Get remaining + # columns to prepend to result. + with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)] + + for col, pre, sep in zip(data_to_encode.items(), prefix, prefix_sep): + # col is (column_name, column), use just column data here + dummy = _get_dummies_1d( + col[1], + prefix=pre, + prefix_sep=sep, + dummy_na=dummy_na, + sparse=sparse, + drop_first=drop_first, + dtype=dtype, + ) + with_dummies.append(dummy) + result = concat(with_dummies, axis=1) + else: + result = _get_dummies_1d( + data, + prefix, + prefix_sep, + dummy_na, + sparse=sparse, + drop_first=drop_first, + dtype=dtype, + ) + return result + + +def _get_dummies_1d( + data, + prefix, + prefix_sep: str | Iterable[str] | dict[str, str] = "_", + dummy_na: bool = False, + sparse: bool = False, + drop_first: bool = False, + dtype: NpDtype | None = None, +) -> DataFrame: + from pandas.core.reshape.concat import concat + + # Series avoids inconsistent NaN handling + codes, levels = factorize_from_iterable(Series(data, copy=False)) + + if dtype is None and hasattr(data, "dtype"): + input_dtype = data.dtype + if isinstance(input_dtype, CategoricalDtype): + input_dtype = input_dtype.categories.dtype + + if isinstance(input_dtype, ArrowDtype): + import pyarrow as pa + + dtype = ArrowDtype(pa.bool_()) # type: ignore[assignment] + elif ( + isinstance(input_dtype, StringDtype) + and input_dtype.storage != "pyarrow_numpy" + ): + dtype = pandas_dtype("boolean") # type: ignore[assignment] + else: + dtype = np.dtype(bool) + elif dtype is None: + dtype = np.dtype(bool) + + _dtype = pandas_dtype(dtype) + + if is_object_dtype(_dtype): + raise ValueError("dtype=object is not a valid dtype for get_dummies") + + def get_empty_frame(data) -> DataFrame: + index: Index | np.ndarray + if isinstance(data, Series): + index = data.index + else: + index = default_index(len(data)) + return DataFrame(index=index) + + # if all NaN + if not dummy_na and len(levels) == 0: + return get_empty_frame(data) + + codes = codes.copy() + if dummy_na: + codes[codes == -1] = len(levels) + levels = levels.insert(len(levels), np.nan) + + # if dummy_na, we just fake a nan level. drop_first will drop it again + if drop_first and len(levels) == 1: + return get_empty_frame(data) + + number_of_cols = len(levels) + + if prefix is None: + dummy_cols = levels + else: + dummy_cols = Index([f"{prefix}{prefix_sep}{level}" for level in levels]) + + index: Index | None + if isinstance(data, Series): + index = data.index + else: + index = None + + if sparse: + fill_value: bool | float + if is_integer_dtype(dtype): + fill_value = 0 + elif dtype == np.dtype(bool): + fill_value = False + else: + fill_value = 0.0 + + sparse_series = [] + N = len(data) + sp_indices: list[list] = [[] for _ in range(len(dummy_cols))] + mask = codes != -1 + codes = codes[mask] + n_idx = np.arange(N)[mask] + + for ndx, code in zip(n_idx, codes): + sp_indices[code].append(ndx) + + if drop_first: + # remove first categorical level to avoid perfect collinearity + # GH12042 + sp_indices = sp_indices[1:] + dummy_cols = dummy_cols[1:] + for col, ixs in zip(dummy_cols, sp_indices): + sarr = SparseArray( + np.ones(len(ixs), dtype=dtype), + sparse_index=IntIndex(N, ixs), + fill_value=fill_value, + dtype=dtype, + ) + sparse_series.append(Series(data=sarr, index=index, name=col, copy=False)) + + return concat(sparse_series, axis=1, copy=False) + + else: + # ensure ndarray layout is column-major + shape = len(codes), number_of_cols + dummy_dtype: NpDtype + if isinstance(_dtype, np.dtype): + dummy_dtype = _dtype + else: + dummy_dtype = np.bool_ + dummy_mat = np.zeros(shape=shape, dtype=dummy_dtype, order="F") + dummy_mat[np.arange(len(codes)), codes] = 1 + + if not dummy_na: + # reset NaN GH4446 + dummy_mat[codes == -1] = 0 + + if drop_first: + # remove first GH12042 + dummy_mat = dummy_mat[:, 1:] + dummy_cols = dummy_cols[1:] + return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype) + + +def from_dummies( + data: DataFrame, + sep: None | str = None, + default_category: None | Hashable | dict[str, Hashable] = None, +) -> DataFrame: + """ + Create a categorical ``DataFrame`` from a ``DataFrame`` of dummy variables. + + Inverts the operation performed by :func:`~pandas.get_dummies`. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + data : DataFrame + Data which contains dummy-coded variables in form of integer columns of + 1's and 0's. + sep : str, default None + Separator used in the column names of the dummy categories they are + character indicating the separation of the categorical names from the prefixes. + For example, if your column names are 'prefix_A' and 'prefix_B', + you can strip the underscore by specifying sep='_'. + default_category : None, Hashable or dict of Hashables, default None + The default category is the implied category when a value has none of the + listed categories specified with a one, i.e. if all dummies in a row are + zero. Can be a single value for all variables or a dict directly mapping + the default categories to a prefix of a variable. + + Returns + ------- + DataFrame + Categorical data decoded from the dummy input-data. + + Raises + ------ + ValueError + * When the input ``DataFrame`` ``data`` contains NA values. + * When the input ``DataFrame`` ``data`` contains column names with separators + that do not match the separator specified with ``sep``. + * When a ``dict`` passed to ``default_category`` does not include an implied + category for each prefix. + * When a value in ``data`` has more than one category assigned to it. + * When ``default_category=None`` and a value in ``data`` has no category + assigned to it. + TypeError + * When the input ``data`` is not of type ``DataFrame``. + * When the input ``DataFrame`` ``data`` contains non-dummy data. + * When the passed ``sep`` is of a wrong data type. + * When the passed ``default_category`` is of a wrong data type. + + See Also + -------- + :func:`~pandas.get_dummies` : Convert ``Series`` or ``DataFrame`` to dummy codes. + :class:`~pandas.Categorical` : Represent a categorical variable in classic. + + Notes + ----- + The columns of the passed dummy data should only include 1's and 0's, + or boolean values. + + Examples + -------- + >>> df = pd.DataFrame({"a": [1, 0, 0, 1], "b": [0, 1, 0, 0], + ... "c": [0, 0, 1, 0]}) + + >>> df + a b c + 0 1 0 0 + 1 0 1 0 + 2 0 0 1 + 3 1 0 0 + + >>> pd.from_dummies(df) + 0 a + 1 b + 2 c + 3 a + + >>> df = pd.DataFrame({"col1_a": [1, 0, 1], "col1_b": [0, 1, 0], + ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], + ... "col2_c": [0, 0, 1]}) + + >>> df + col1_a col1_b col2_a col2_b col2_c + 0 1 0 0 1 0 + 1 0 1 1 0 0 + 2 1 0 0 0 1 + + >>> pd.from_dummies(df, sep="_") + col1 col2 + 0 a b + 1 b a + 2 a c + + >>> df = pd.DataFrame({"col1_a": [1, 0, 0], "col1_b": [0, 1, 0], + ... "col2_a": [0, 1, 0], "col2_b": [1, 0, 0], + ... "col2_c": [0, 0, 0]}) + + >>> df + col1_a col1_b col2_a col2_b col2_c + 0 1 0 0 1 0 + 1 0 1 1 0 0 + 2 0 0 0 0 0 + + >>> pd.from_dummies(df, sep="_", default_category={"col1": "d", "col2": "e"}) + col1 col2 + 0 a b + 1 b a + 2 d e + """ + from pandas.core.reshape.concat import concat + + if not isinstance(data, DataFrame): + raise TypeError( + "Expected 'data' to be a 'DataFrame'; " + f"Received 'data' of type: {type(data).__name__}" + ) + + col_isna_mask = cast(Series, data.isna().any()) + + if col_isna_mask.any(): + raise ValueError( + "Dummy DataFrame contains NA value in column: " + f"'{col_isna_mask.idxmax()}'" + ) + + # index data with a list of all columns that are dummies + try: + data_to_decode = data.astype("boolean", copy=False) + except TypeError: + raise TypeError("Passed DataFrame contains non-dummy data") + + # collect prefixes and get lists to slice data for each prefix + variables_slice = defaultdict(list) + if sep is None: + variables_slice[""] = list(data.columns) + elif isinstance(sep, str): + for col in data_to_decode.columns: + prefix = col.split(sep)[0] + if len(prefix) == len(col): + raise ValueError(f"Separator not specified for column: {col}") + variables_slice[prefix].append(col) + else: + raise TypeError( + "Expected 'sep' to be of type 'str' or 'None'; " + f"Received 'sep' of type: {type(sep).__name__}" + ) + + if default_category is not None: + if isinstance(default_category, dict): + if not len(default_category) == len(variables_slice): + len_msg = ( + f"Length of 'default_category' ({len(default_category)}) " + f"did not match the length of the columns being encoded " + f"({len(variables_slice)})" + ) + raise ValueError(len_msg) + elif isinstance(default_category, Hashable): + default_category = dict( + zip(variables_slice, [default_category] * len(variables_slice)) + ) + else: + raise TypeError( + "Expected 'default_category' to be of type " + "'None', 'Hashable', or 'dict'; " + "Received 'default_category' of type: " + f"{type(default_category).__name__}" + ) + + cat_data = {} + for prefix, prefix_slice in variables_slice.items(): + if sep is None: + cats = prefix_slice.copy() + else: + cats = [col[len(prefix + sep) :] for col in prefix_slice] + assigned = data_to_decode.loc[:, prefix_slice].sum(axis=1) + if any(assigned > 1): + raise ValueError( + "Dummy DataFrame contains multi-assignment(s); " + f"First instance in row: {assigned.idxmax()}" + ) + if any(assigned == 0): + if isinstance(default_category, dict): + cats.append(default_category[prefix]) + else: + raise ValueError( + "Dummy DataFrame contains unassigned value(s); " + f"First instance in row: {assigned.idxmin()}" + ) + data_slice = concat( + (data_to_decode.loc[:, prefix_slice], assigned == 0), axis=1 + ) + else: + data_slice = data_to_decode.loc[:, prefix_slice] + cats_array = data._constructor_sliced(cats, dtype=data.columns.dtype) + # get indices of True entries along axis=1 + true_values = data_slice.idxmax(axis=1) + indexer = data_slice.columns.get_indexer_for(true_values) + cat_data[prefix] = cats_array.take(indexer).set_axis(data.index) + + result = DataFrame(cat_data) + if sep is not None: + result.columns = result.columns.astype(data.columns.dtype) + return result diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/melt.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/melt.py new file mode 100644 index 0000000000000000000000000000000000000000..e54f847895f1a42cf1782392da684f2bcfa7e81c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/melt.py @@ -0,0 +1,512 @@ +from __future__ import annotations + +import re +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.util._decorators import Appender + +from pandas.core.dtypes.common import is_list_like +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.missing import notna + +import pandas.core.algorithms as algos +from pandas.core.indexes.api import MultiIndex +from pandas.core.reshape.concat import concat +from pandas.core.reshape.util import tile_compat +from pandas.core.shared_docs import _shared_docs +from pandas.core.tools.numeric import to_numeric + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import AnyArrayLike + + from pandas import DataFrame + + +def ensure_list_vars(arg_vars, variable: str, columns) -> list: + if arg_vars is not None: + if not is_list_like(arg_vars): + return [arg_vars] + elif isinstance(columns, MultiIndex) and not isinstance(arg_vars, list): + raise ValueError( + f"{variable} must be a list of tuples when columns are a MultiIndex" + ) + else: + return list(arg_vars) + else: + return [] + + +@Appender(_shared_docs["melt"] % {"caller": "pd.melt(df, ", "other": "DataFrame.melt"}) +def melt( + frame: DataFrame, + id_vars=None, + value_vars=None, + var_name=None, + value_name: Hashable = "value", + col_level=None, + ignore_index: bool = True, +) -> DataFrame: + if value_name in frame.columns: + raise ValueError( + f"value_name ({value_name}) cannot match an element in " + "the DataFrame columns." + ) + id_vars = ensure_list_vars(id_vars, "id_vars", frame.columns) + value_vars_was_not_none = value_vars is not None + value_vars = ensure_list_vars(value_vars, "value_vars", frame.columns) + + if id_vars or value_vars: + if col_level is not None: + level = frame.columns.get_level_values(col_level) + else: + level = frame.columns + labels = id_vars + value_vars + idx = level.get_indexer_for(labels) + missing = idx == -1 + if missing.any(): + missing_labels = [ + lab for lab, not_found in zip(labels, missing) if not_found + ] + raise KeyError( + "The following id_vars or value_vars are not present in " + f"the DataFrame: {missing_labels}" + ) + if value_vars_was_not_none: + frame = frame.iloc[:, algos.unique(idx)] + else: + frame = frame.copy() + else: + frame = frame.copy() + + if col_level is not None: # allow list or other? + # frame is a copy + frame.columns = frame.columns.get_level_values(col_level) + + if var_name is None: + if isinstance(frame.columns, MultiIndex): + if len(frame.columns.names) == len(set(frame.columns.names)): + var_name = frame.columns.names + else: + var_name = [f"variable_{i}" for i in range(len(frame.columns.names))] + else: + var_name = [ + frame.columns.name if frame.columns.name is not None else "variable" + ] + elif is_list_like(var_name): + raise ValueError(f"{var_name=} must be a scalar.") + else: + var_name = [var_name] + + num_rows, K = frame.shape + num_cols_adjusted = K - len(id_vars) + + mdata: dict[Hashable, AnyArrayLike] = {} + for col in id_vars: + id_data = frame.pop(col) + if not isinstance(id_data.dtype, np.dtype): + # i.e. ExtensionDtype + if num_cols_adjusted > 0: + mdata[col] = concat([id_data] * num_cols_adjusted, ignore_index=True) + else: + # We can't concat empty list. (GH 46044) + mdata[col] = type(id_data)([], name=id_data.name, dtype=id_data.dtype) + else: + mdata[col] = np.tile(id_data._values, num_cols_adjusted) + + mcolumns = id_vars + var_name + [value_name] + + if frame.shape[1] > 0 and not any( + not isinstance(dt, np.dtype) and dt._supports_2d for dt in frame.dtypes + ): + mdata[value_name] = concat( + [frame.iloc[:, i] for i in range(frame.shape[1])] + ).values + else: + mdata[value_name] = frame._values.ravel("F") + for i, col in enumerate(var_name): + mdata[col] = frame.columns._get_level_values(i).repeat(num_rows) + + result = frame._constructor(mdata, columns=mcolumns) + + if not ignore_index: + result.index = tile_compat(frame.index, num_cols_adjusted) + + return result + + +def lreshape(data: DataFrame, groups: dict, dropna: bool = True) -> DataFrame: + """ + Reshape wide-format data to long. Generalized inverse of DataFrame.pivot. + + Accepts a dictionary, ``groups``, in which each key is a new column name + and each value is a list of old column names that will be "melted" under + the new column name as part of the reshape. + + Parameters + ---------- + data : DataFrame + The wide-format DataFrame. + groups : dict + {new_name : list_of_columns}. + dropna : bool, default True + Do not include columns whose entries are all NaN. + + Returns + ------- + DataFrame + Reshaped DataFrame. + + See Also + -------- + melt : Unpivot a DataFrame from wide to long format, optionally leaving + identifiers set. + pivot : Create a spreadsheet-style pivot table as a DataFrame. + DataFrame.pivot : Pivot without aggregation that can handle + non-numeric data. + DataFrame.pivot_table : Generalization of pivot that can handle + duplicate values for one index/column pair. + DataFrame.unstack : Pivot based on the index values instead of a + column. + wide_to_long : Wide panel to long format. Less flexible but more + user-friendly than melt. + + Examples + -------- + >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], + ... 'team': ['Red Sox', 'Yankees'], + ... 'year1': [2007, 2007], 'year2': [2008, 2008]}) + >>> data + hr1 hr2 team year1 year2 + 0 514 545 Red Sox 2007 2008 + 1 573 526 Yankees 2007 2008 + + >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) + team year hr + 0 Red Sox 2007 514 + 1 Yankees 2007 573 + 2 Red Sox 2008 545 + 3 Yankees 2008 526 + """ + mdata = {} + pivot_cols = [] + all_cols: set[Hashable] = set() + K = len(next(iter(groups.values()))) + for target, names in groups.items(): + if len(names) != K: + raise ValueError("All column lists must be same length") + to_concat = [data[col]._values for col in names] + + mdata[target] = concat_compat(to_concat) + pivot_cols.append(target) + all_cols = all_cols.union(names) + + id_cols = list(data.columns.difference(all_cols)) + for col in id_cols: + mdata[col] = np.tile(data[col]._values, K) + + if dropna: + mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) + for c in pivot_cols: + mask &= notna(mdata[c]) + if not mask.all(): + mdata = {k: v[mask] for k, v in mdata.items()} + + return data._constructor(mdata, columns=id_cols + pivot_cols) + + +def wide_to_long( + df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+" +) -> DataFrame: + r""" + Unpivot a DataFrame from wide to long format. + + Less flexible but more user-friendly than melt. + + With stubnames ['A', 'B'], this function expects to find one or more + group of columns with format + A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... + You specify what you want to call this suffix in the resulting long format + with `j` (for example `j='year'`) + + Each row of these wide variables are assumed to be uniquely identified by + `i` (can be a single column name or a list of column names) + + All remaining variables in the data frame are left intact. + + Parameters + ---------- + df : DataFrame + The wide-format DataFrame. + stubnames : str or list-like + The stub name(s). The wide format variables are assumed to + start with the stub names. + i : str or list-like + Column(s) to use as id variable(s). + j : str + The name of the sub-observation variable. What you wish to name your + suffix in the long format. + sep : str, default "" + A character indicating the separation of the variable names + in the wide format, to be stripped from the names in the long format. + For example, if your column names are A-suffix1, A-suffix2, you + can strip the hyphen by specifying `sep='-'`. + suffix : str, default '\\d+' + A regular expression capturing the wanted suffixes. '\\d+' captures + numeric suffixes. Suffixes with no numbers could be specified with the + negated character class '\\D+'. You can also further disambiguate + suffixes, for example, if your wide variables are of the form A-one, + B-two,.., and you have an unrelated column A-rating, you can ignore the + last one by specifying `suffix='(!?one|two)'`. When all suffixes are + numeric, they are cast to int64/float64. + + Returns + ------- + DataFrame + A DataFrame that contains each stub name as a variable, with new index + (i, j). + + See Also + -------- + melt : Unpivot a DataFrame from wide to long format, optionally leaving + identifiers set. + pivot : Create a spreadsheet-style pivot table as a DataFrame. + DataFrame.pivot : Pivot without aggregation that can handle + non-numeric data. + DataFrame.pivot_table : Generalization of pivot that can handle + duplicate values for one index/column pair. + DataFrame.unstack : Pivot based on the index values instead of a + column. + + Notes + ----- + All extra variables are left untouched. This simply uses + `pandas.melt` under the hood, but is hard-coded to "do the right thing" + in a typical case. + + Examples + -------- + >>> np.random.seed(123) + >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, + ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, + ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, + ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, + ... "X" : dict(zip(range(3), np.random.randn(3))) + ... }) + >>> df["id"] = df.index + >>> df + A1970 A1980 B1970 B1980 X id + 0 a d 2.5 3.2 -1.085631 0 + 1 b e 1.2 1.3 0.997345 1 + 2 c f 0.7 0.1 0.282978 2 + >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") + ... # doctest: +NORMALIZE_WHITESPACE + X A B + id year + 0 1970 -1.085631 a 2.5 + 1 1970 0.997345 b 1.2 + 2 1970 0.282978 c 0.7 + 0 1980 -1.085631 d 3.2 + 1 1980 0.997345 e 1.3 + 2 1980 0.282978 f 0.1 + + With multiple id columns + + >>> df = pd.DataFrame({ + ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], + ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], + ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], + ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] + ... }) + >>> df + famid birth ht1 ht2 + 0 1 1 2.8 3.4 + 1 1 2 2.9 3.8 + 2 1 3 2.2 2.9 + 3 2 1 2.0 3.2 + 4 2 2 1.8 2.8 + 5 2 3 1.9 2.4 + 6 3 1 2.2 3.3 + 7 3 2 2.3 3.4 + 8 3 3 2.1 2.9 + >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') + >>> l + ... # doctest: +NORMALIZE_WHITESPACE + ht + famid birth age + 1 1 1 2.8 + 2 3.4 + 2 1 2.9 + 2 3.8 + 3 1 2.2 + 2 2.9 + 2 1 1 2.0 + 2 3.2 + 2 1 1.8 + 2 2.8 + 3 1 1.9 + 2 2.4 + 3 1 1 2.2 + 2 3.3 + 2 1 2.3 + 2 3.4 + 3 1 2.1 + 2 2.9 + + Going from long back to wide just takes some creative use of `unstack` + + >>> w = l.unstack() + >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) + >>> w.reset_index() + famid birth ht1 ht2 + 0 1 1 2.8 3.4 + 1 1 2 2.9 3.8 + 2 1 3 2.2 2.9 + 3 2 1 2.0 3.2 + 4 2 2 1.8 2.8 + 5 2 3 1.9 2.4 + 6 3 1 2.2 3.3 + 7 3 2 2.3 3.4 + 8 3 3 2.1 2.9 + + Less wieldy column names are also handled + + >>> np.random.seed(0) + >>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3), + ... 'A(weekly)-2011': np.random.rand(3), + ... 'B(weekly)-2010': np.random.rand(3), + ... 'B(weekly)-2011': np.random.rand(3), + ... 'X' : np.random.randint(3, size=3)}) + >>> df['id'] = df.index + >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS + A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id + 0 0.548814 0.544883 0.437587 0.383442 0 0 + 1 0.715189 0.423655 0.891773 0.791725 1 1 + 2 0.602763 0.645894 0.963663 0.528895 1 2 + + >>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id', + ... j='year', sep='-') + ... # doctest: +NORMALIZE_WHITESPACE + X A(weekly) B(weekly) + id year + 0 2010 0 0.548814 0.437587 + 1 2010 1 0.715189 0.891773 + 2 2010 1 0.602763 0.963663 + 0 2011 0 0.544883 0.383442 + 1 2011 1 0.423655 0.791725 + 2 2011 1 0.645894 0.528895 + + If we have many columns, we could also use a regex to find our + stubnames and pass that list on to wide_to_long + + >>> stubnames = sorted( + ... set([match[0] for match in df.columns.str.findall( + ... r'[A-B]\(.*\)').values if match != []]) + ... ) + >>> list(stubnames) + ['A(weekly)', 'B(weekly)'] + + All of the above examples have integers as suffixes. It is possible to + have non-integers as suffixes. + + >>> df = pd.DataFrame({ + ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], + ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], + ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], + ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] + ... }) + >>> df + famid birth ht_one ht_two + 0 1 1 2.8 3.4 + 1 1 2 2.9 3.8 + 2 1 3 2.2 2.9 + 3 2 1 2.0 3.2 + 4 2 2 1.8 2.8 + 5 2 3 1.9 2.4 + 6 3 1 2.2 3.3 + 7 3 2 2.3 3.4 + 8 3 3 2.1 2.9 + + >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', + ... sep='_', suffix=r'\w+') + >>> l + ... # doctest: +NORMALIZE_WHITESPACE + ht + famid birth age + 1 1 one 2.8 + two 3.4 + 2 one 2.9 + two 3.8 + 3 one 2.2 + two 2.9 + 2 1 one 2.0 + two 3.2 + 2 one 1.8 + two 2.8 + 3 one 1.9 + two 2.4 + 3 1 one 2.2 + two 3.3 + 2 one 2.3 + two 3.4 + 3 one 2.1 + two 2.9 + """ + + def get_var_names(df, stub: str, sep: str, suffix: str): + regex = rf"^{re.escape(stub)}{re.escape(sep)}{suffix}$" + return df.columns[df.columns.str.match(regex)] + + def melt_stub(df, stub: str, i, j, value_vars, sep: str): + newdf = melt( + df, + id_vars=i, + value_vars=value_vars, + value_name=stub.rstrip(sep), + var_name=j, + ) + newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "", regex=True) + + # GH17627 Cast numerics suffixes to int/float + try: + newdf[j] = to_numeric(newdf[j]) + except (TypeError, ValueError, OverflowError): + # TODO: anything else to catch? + pass + + return newdf.set_index(i + [j]) + + if not is_list_like(stubnames): + stubnames = [stubnames] + else: + stubnames = list(stubnames) + + if df.columns.isin(stubnames).any(): + raise ValueError("stubname can't be identical to a column name") + + if not is_list_like(i): + i = [i] + else: + i = list(i) + + if df[i].duplicated().any(): + raise ValueError("the id variables need to uniquely identify each row") + + _melted = [] + value_vars_flattened = [] + for stub in stubnames: + value_var = get_var_names(df, stub, sep, suffix) + value_vars_flattened.extend(value_var) + _melted.append(melt_stub(df, stub, i, j, value_var, sep)) + + melted = concat(_melted, axis=1) + id_vars = df.columns.difference(value_vars_flattened) + new = df[id_vars] + + if len(i) == 1: + return new.set_index(i).join(melted) + else: + return new.merge(melted.reset_index(), on=i).set_index(i + [j]) diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/merge.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/merge.py new file mode 100644 index 0000000000000000000000000000000000000000..646f40f6141d836a8ecf5354e56203010f00f6d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/merge.py @@ -0,0 +1,2762 @@ +""" +SQL-style merge routines +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Sequence, +) +import datetime +from functools import partial +from typing import ( + TYPE_CHECKING, + Literal, + cast, + final, +) +import uuid +import warnings + +import numpy as np + +from pandas._libs import ( + Timedelta, + hashtable as libhashtable, + join as libjoin, + lib, +) +from pandas._libs.lib import is_range_indexer +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + IndexLabel, + JoinHow, + MergeHow, + Shape, + Suffixes, + npt, +) +from pandas.errors import MergeError +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.cast import find_common_type +from pandas.core.dtypes.common import ( + ensure_int64, + ensure_object, + is_bool, + is_bool_dtype, + is_float_dtype, + is_integer, + is_integer_dtype, + is_list_like, + is_number, + is_numeric_dtype, + is_object_dtype, + is_string_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, +) + +from pandas import ( + ArrowDtype, + Categorical, + Index, + MultiIndex, + Series, +) +import pandas.core.algorithms as algos +from pandas.core.arrays import ( + ArrowExtensionArray, + BaseMaskedArray, + ExtensionArray, +) +from pandas.core.arrays.string_ import StringDtype +import pandas.core.common as com +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.frame import _merge_doc +from pandas.core.indexes.api import default_index +from pandas.core.sorting import ( + get_group_index, + is_int64_overflow_possible, +) + +if TYPE_CHECKING: + from pandas import DataFrame + from pandas.core import groupby + from pandas.core.arrays import DatetimeArray + from pandas.core.indexes.frozen import FrozenList + +_factorizers = { + np.int64: libhashtable.Int64Factorizer, + np.longlong: libhashtable.Int64Factorizer, + np.int32: libhashtable.Int32Factorizer, + np.int16: libhashtable.Int16Factorizer, + np.int8: libhashtable.Int8Factorizer, + np.uint64: libhashtable.UInt64Factorizer, + np.uint32: libhashtable.UInt32Factorizer, + np.uint16: libhashtable.UInt16Factorizer, + np.uint8: libhashtable.UInt8Factorizer, + np.bool_: libhashtable.UInt8Factorizer, + np.float64: libhashtable.Float64Factorizer, + np.float32: libhashtable.Float32Factorizer, + np.complex64: libhashtable.Complex64Factorizer, + np.complex128: libhashtable.Complex128Factorizer, + np.object_: libhashtable.ObjectFactorizer, +} + +# See https://github.com/pandas-dev/pandas/issues/52451 +if np.intc is not np.int32: + _factorizers[np.intc] = libhashtable.Int64Factorizer + +_known = (np.ndarray, ExtensionArray, Index, ABCSeries) + + +@Substitution("\nleft : DataFrame or named Series") +@Appender(_merge_doc, indents=0) +def merge( + left: DataFrame | Series, + right: DataFrame | Series, + how: MergeHow = "inner", + on: IndexLabel | AnyArrayLike | None = None, + left_on: IndexLabel | AnyArrayLike | None = None, + right_on: IndexLabel | AnyArrayLike | None = None, + left_index: bool = False, + right_index: bool = False, + sort: bool = False, + suffixes: Suffixes = ("_x", "_y"), + copy: bool | None = None, + indicator: str | bool = False, + validate: str | None = None, +) -> DataFrame: + left_df = _validate_operand(left) + right_df = _validate_operand(right) + if how == "cross": + return _cross_merge( + left_df, + right_df, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + sort=sort, + suffixes=suffixes, + indicator=indicator, + validate=validate, + copy=copy, + ) + else: + op = _MergeOperation( + left_df, + right_df, + how=how, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + sort=sort, + suffixes=suffixes, + indicator=indicator, + validate=validate, + ) + return op.get_result(copy=copy) + + +def _cross_merge( + left: DataFrame, + right: DataFrame, + on: IndexLabel | AnyArrayLike | None = None, + left_on: IndexLabel | AnyArrayLike | None = None, + right_on: IndexLabel | AnyArrayLike | None = None, + left_index: bool = False, + right_index: bool = False, + sort: bool = False, + suffixes: Suffixes = ("_x", "_y"), + copy: bool | None = None, + indicator: str | bool = False, + validate: str | None = None, +) -> DataFrame: + """ + See merge.__doc__ with how='cross' + """ + + if ( + left_index + or right_index + or right_on is not None + or left_on is not None + or on is not None + ): + raise MergeError( + "Can not pass on, right_on, left_on or set right_index=True or " + "left_index=True" + ) + + cross_col = f"_cross_{uuid.uuid4()}" + left = left.assign(**{cross_col: 1}) + right = right.assign(**{cross_col: 1}) + + left_on = right_on = [cross_col] + + res = merge( + left, + right, + how="inner", + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + sort=sort, + suffixes=suffixes, + indicator=indicator, + validate=validate, + copy=copy, + ) + del res[cross_col] + return res + + +def _groupby_and_merge( + by, left: DataFrame | Series, right: DataFrame | Series, merge_pieces +): + """ + groupby & merge; we are always performing a left-by type operation + + Parameters + ---------- + by: field to group + left: DataFrame + right: DataFrame + merge_pieces: function for merging + """ + pieces = [] + if not isinstance(by, (list, tuple)): + by = [by] + + lby = left.groupby(by, sort=False) + rby: groupby.DataFrameGroupBy | groupby.SeriesGroupBy | None = None + + # if we can groupby the rhs + # then we can get vastly better perf + if all(item in right.columns for item in by): + rby = right.groupby(by, sort=False) + + for key, lhs in lby._grouper.get_iterator(lby._selected_obj, axis=lby.axis): + if rby is None: + rhs = right + else: + try: + rhs = right.take(rby.indices[key]) + except KeyError: + # key doesn't exist in left + lcols = lhs.columns.tolist() + cols = lcols + [r for r in right.columns if r not in set(lcols)] + merged = lhs.reindex(columns=cols) + merged.index = range(len(merged)) + pieces.append(merged) + continue + + merged = merge_pieces(lhs, rhs) + + # make sure join keys are in the merged + # TODO, should merge_pieces do this? + merged[by] = key + + pieces.append(merged) + + # preserve the original order + # if we have a missing piece this can be reset + from pandas.core.reshape.concat import concat + + result = concat(pieces, ignore_index=True) + result = result.reindex(columns=pieces[0].columns, copy=False) + return result, lby + + +def merge_ordered( + left: DataFrame | Series, + right: DataFrame | Series, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_by=None, + right_by=None, + fill_method: str | None = None, + suffixes: Suffixes = ("_x", "_y"), + how: JoinHow = "outer", +) -> DataFrame: + """ + Perform a merge for ordered data with optional filling/interpolation. + + Designed for ordered data like time series data. Optionally + perform group-wise merge (see examples). + + Parameters + ---------- + left : DataFrame or named Series + right : DataFrame or named Series + on : label or list + Field names to join on. Must be found in both DataFrames. + left_on : label or list, or array-like + Field names to join on in left DataFrame. Can be a vector or list of + vectors of the length of the DataFrame to use a particular vector as + the join key instead of columns. + right_on : label or list, or array-like + Field names to join on in right DataFrame or vector/list of vectors per + left_on docs. + left_by : column name or list of column names + Group left DataFrame by group columns and merge piece by piece with + right DataFrame. Must be None if either left or right are a Series. + right_by : column name or list of column names + Group right DataFrame by group columns and merge piece by piece with + left DataFrame. Must be None if either left or right are a Series. + fill_method : {'ffill', None}, default None + Interpolation method for data. + suffixes : list-like, default is ("_x", "_y") + A length-2 sequence where each element is optionally a string + indicating the suffix to add to overlapping column names in + `left` and `right` respectively. Pass a value of `None` instead + of a string to indicate that the column name from `left` or + `right` should be left as-is, with no suffix. At least one of the + values must not be None. + + how : {'left', 'right', 'outer', 'inner'}, default 'outer' + * left: use only keys from left frame (SQL: left outer join) + * right: use only keys from right frame (SQL: right outer join) + * outer: use union of keys from both frames (SQL: full outer join) + * inner: use intersection of keys from both frames (SQL: inner join). + + Returns + ------- + DataFrame + The merged DataFrame output type will be the same as + 'left', if it is a subclass of DataFrame. + + See Also + -------- + merge : Merge with a database-style join. + merge_asof : Merge on nearest keys. + + Examples + -------- + >>> from pandas import merge_ordered + >>> df1 = pd.DataFrame( + ... { + ... "key": ["a", "c", "e", "a", "c", "e"], + ... "lvalue": [1, 2, 3, 1, 2, 3], + ... "group": ["a", "a", "a", "b", "b", "b"] + ... } + ... ) + >>> df1 + key lvalue group + 0 a 1 a + 1 c 2 a + 2 e 3 a + 3 a 1 b + 4 c 2 b + 5 e 3 b + + >>> df2 = pd.DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]}) + >>> df2 + key rvalue + 0 b 1 + 1 c 2 + 2 d 3 + + >>> merge_ordered(df1, df2, fill_method="ffill", left_by="group") + key lvalue group rvalue + 0 a 1 a NaN + 1 b 1 a 1.0 + 2 c 2 a 2.0 + 3 d 2 a 3.0 + 4 e 3 a 3.0 + 5 a 1 b NaN + 6 b 1 b 1.0 + 7 c 2 b 2.0 + 8 d 2 b 3.0 + 9 e 3 b 3.0 + """ + + def _merger(x, y) -> DataFrame: + # perform the ordered merge operation + op = _OrderedMerge( + x, + y, + on=on, + left_on=left_on, + right_on=right_on, + suffixes=suffixes, + fill_method=fill_method, + how=how, + ) + return op.get_result() + + if left_by is not None and right_by is not None: + raise ValueError("Can only group either left or right frames") + if left_by is not None: + if isinstance(left_by, str): + left_by = [left_by] + check = set(left_by).difference(left.columns) + if len(check) != 0: + raise KeyError(f"{check} not found in left columns") + result, _ = _groupby_and_merge(left_by, left, right, lambda x, y: _merger(x, y)) + elif right_by is not None: + if isinstance(right_by, str): + right_by = [right_by] + check = set(right_by).difference(right.columns) + if len(check) != 0: + raise KeyError(f"{check} not found in right columns") + result, _ = _groupby_and_merge( + right_by, right, left, lambda x, y: _merger(y, x) + ) + else: + result = _merger(left, right) + return result + + +def merge_asof( + left: DataFrame | Series, + right: DataFrame | Series, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + by=None, + left_by=None, + right_by=None, + suffixes: Suffixes = ("_x", "_y"), + tolerance: int | Timedelta | None = None, + allow_exact_matches: bool = True, + direction: str = "backward", +) -> DataFrame: + """ + Perform a merge by key distance. + + This is similar to a left-join except that we match on nearest + key rather than equal keys. Both DataFrames must be sorted by the key. + + For each row in the left DataFrame: + + - A "backward" search selects the last row in the right DataFrame whose + 'on' key is less than or equal to the left's key. + + - A "forward" search selects the first row in the right DataFrame whose + 'on' key is greater than or equal to the left's key. + + - A "nearest" search selects the row in the right DataFrame whose 'on' + key is closest in absolute distance to the left's key. + + Optionally match on equivalent keys with 'by' before searching with 'on'. + + Parameters + ---------- + left : DataFrame or named Series + right : DataFrame or named Series + on : label + Field name to join on. Must be found in both DataFrames. + The data MUST be ordered. Furthermore this must be a numeric column, + such as datetimelike, integer, or float. On or left_on/right_on + must be given. + left_on : label + Field name to join on in left DataFrame. + right_on : label + Field name to join on in right DataFrame. + left_index : bool + Use the index of the left DataFrame as the join key. + right_index : bool + Use the index of the right DataFrame as the join key. + by : column name or list of column names + Match on these columns before performing merge operation. + left_by : column name + Field names to match on in the left DataFrame. + right_by : column name + Field names to match on in the right DataFrame. + suffixes : 2-length sequence (tuple, list, ...) + Suffix to apply to overlapping column names in the left and right + side, respectively. + tolerance : int or Timedelta, optional, default None + Select asof tolerance within this range; must be compatible + with the merge index. + allow_exact_matches : bool, default True + + - If True, allow matching with the same 'on' value + (i.e. less-than-or-equal-to / greater-than-or-equal-to) + - If False, don't match the same 'on' value + (i.e., strictly less-than / strictly greater-than). + + direction : 'backward' (default), 'forward', or 'nearest' + Whether to search for prior, subsequent, or closest matches. + + Returns + ------- + DataFrame + + See Also + -------- + merge : Merge with a database-style join. + merge_ordered : Merge with optional filling/interpolation. + + Examples + -------- + >>> left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]}) + >>> left + a left_val + 0 1 a + 1 5 b + 2 10 c + + >>> right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]}) + >>> right + a right_val + 0 1 1 + 1 2 2 + 2 3 3 + 3 6 6 + 4 7 7 + + >>> pd.merge_asof(left, right, on="a") + a left_val right_val + 0 1 a 1 + 1 5 b 3 + 2 10 c 7 + + >>> pd.merge_asof(left, right, on="a", allow_exact_matches=False) + a left_val right_val + 0 1 a NaN + 1 5 b 3.0 + 2 10 c 7.0 + + >>> pd.merge_asof(left, right, on="a", direction="forward") + a left_val right_val + 0 1 a 1.0 + 1 5 b 6.0 + 2 10 c NaN + + >>> pd.merge_asof(left, right, on="a", direction="nearest") + a left_val right_val + 0 1 a 1 + 1 5 b 6 + 2 10 c 7 + + We can use indexed DataFrames as well. + + >>> left = pd.DataFrame({"left_val": ["a", "b", "c"]}, index=[1, 5, 10]) + >>> left + left_val + 1 a + 5 b + 10 c + + >>> right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7]) + >>> right + right_val + 1 1 + 2 2 + 3 3 + 6 6 + 7 7 + + >>> pd.merge_asof(left, right, left_index=True, right_index=True) + left_val right_val + 1 a 1 + 5 b 3 + 10 c 7 + + Here is a real-world times-series example + + >>> quotes = pd.DataFrame( + ... { + ... "time": [ + ... pd.Timestamp("2016-05-25 13:30:00.023"), + ... pd.Timestamp("2016-05-25 13:30:00.023"), + ... pd.Timestamp("2016-05-25 13:30:00.030"), + ... pd.Timestamp("2016-05-25 13:30:00.041"), + ... pd.Timestamp("2016-05-25 13:30:00.048"), + ... pd.Timestamp("2016-05-25 13:30:00.049"), + ... pd.Timestamp("2016-05-25 13:30:00.072"), + ... pd.Timestamp("2016-05-25 13:30:00.075") + ... ], + ... "ticker": [ + ... "GOOG", + ... "MSFT", + ... "MSFT", + ... "MSFT", + ... "GOOG", + ... "AAPL", + ... "GOOG", + ... "MSFT" + ... ], + ... "bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01], + ... "ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03] + ... } + ... ) + >>> quotes + time ticker bid ask + 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 + 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 + 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 + 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 + 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 + 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 + 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 + 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 + + >>> trades = pd.DataFrame( + ... { + ... "time": [ + ... pd.Timestamp("2016-05-25 13:30:00.023"), + ... pd.Timestamp("2016-05-25 13:30:00.038"), + ... pd.Timestamp("2016-05-25 13:30:00.048"), + ... pd.Timestamp("2016-05-25 13:30:00.048"), + ... pd.Timestamp("2016-05-25 13:30:00.048") + ... ], + ... "ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"], + ... "price": [51.95, 51.95, 720.77, 720.92, 98.0], + ... "quantity": [75, 155, 100, 100, 100] + ... } + ... ) + >>> trades + time ticker price quantity + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 + + By default we are taking the asof of the quotes + + >>> pd.merge_asof(trades, quotes, on="time", by="ticker") + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + + We only asof within 2ms between the quote time and the trade time + + >>> pd.merge_asof( + ... trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms") + ... ) + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + + We only asof within 10ms between the quote time and the trade time + and we exclude exact matches on time. However *prior* data will + propagate forward + + >>> pd.merge_asof( + ... trades, + ... quotes, + ... on="time", + ... by="ticker", + ... tolerance=pd.Timedelta("10ms"), + ... allow_exact_matches=False + ... ) + time ticker price quantity bid ask + 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN + 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 + 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN + 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN + 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN + """ + op = _AsOfMerge( + left, + right, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + by=by, + left_by=left_by, + right_by=right_by, + suffixes=suffixes, + how="asof", + tolerance=tolerance, + allow_exact_matches=allow_exact_matches, + direction=direction, + ) + return op.get_result() + + +# TODO: transformations?? +# TODO: only copy DataFrames when modification necessary +class _MergeOperation: + """ + Perform a database (SQL) merge operation between two DataFrame or Series + objects using either columns as keys or their row indexes + """ + + _merge_type = "merge" + how: JoinHow | Literal["asof"] + on: IndexLabel | None + # left_on/right_on may be None when passed, but in validate_specification + # get replaced with non-None. + left_on: Sequence[Hashable | AnyArrayLike] + right_on: Sequence[Hashable | AnyArrayLike] + left_index: bool + right_index: bool + sort: bool + suffixes: Suffixes + copy: bool + indicator: str | bool + validate: str | None + join_names: list[Hashable] + right_join_keys: list[ArrayLike] + left_join_keys: list[ArrayLike] + + def __init__( + self, + left: DataFrame | Series, + right: DataFrame | Series, + how: JoinHow | Literal["asof"] = "inner", + on: IndexLabel | AnyArrayLike | None = None, + left_on: IndexLabel | AnyArrayLike | None = None, + right_on: IndexLabel | AnyArrayLike | None = None, + left_index: bool = False, + right_index: bool = False, + sort: bool = True, + suffixes: Suffixes = ("_x", "_y"), + indicator: str | bool = False, + validate: str | None = None, + ) -> None: + _left = _validate_operand(left) + _right = _validate_operand(right) + self.left = self.orig_left = _left + self.right = self.orig_right = _right + self.how = how + + self.on = com.maybe_make_list(on) + + self.suffixes = suffixes + self.sort = sort or how == "outer" + + self.left_index = left_index + self.right_index = right_index + + self.indicator = indicator + + if not is_bool(left_index): + raise ValueError( + f"left_index parameter must be of type bool, not {type(left_index)}" + ) + if not is_bool(right_index): + raise ValueError( + f"right_index parameter must be of type bool, not {type(right_index)}" + ) + + # GH 40993: raise when merging between different levels; enforced in 2.0 + if _left.columns.nlevels != _right.columns.nlevels: + msg = ( + "Not allowed to merge between different levels. " + f"({_left.columns.nlevels} levels on the left, " + f"{_right.columns.nlevels} on the right)" + ) + raise MergeError(msg) + + self.left_on, self.right_on = self._validate_left_right_on(left_on, right_on) + + ( + self.left_join_keys, + self.right_join_keys, + self.join_names, + left_drop, + right_drop, + ) = self._get_merge_keys() + + if left_drop: + self.left = self.left._drop_labels_or_levels(left_drop) + + if right_drop: + self.right = self.right._drop_labels_or_levels(right_drop) + + self._maybe_require_matching_dtypes(self.left_join_keys, self.right_join_keys) + self._validate_tolerance(self.left_join_keys) + + # validate the merge keys dtypes. We may need to coerce + # to avoid incompatible dtypes + self._maybe_coerce_merge_keys() + + # If argument passed to validate, + # check if columns specified as unique + # are in fact unique. + if validate is not None: + self._validate_validate_kwd(validate) + + def _maybe_require_matching_dtypes( + self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike] + ) -> None: + # Overridden by AsOfMerge + pass + + def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None: + # Overridden by AsOfMerge + pass + + @final + def _reindex_and_concat( + self, + join_index: Index, + left_indexer: npt.NDArray[np.intp] | None, + right_indexer: npt.NDArray[np.intp] | None, + copy: bool | None, + ) -> DataFrame: + """ + reindex along index and concat along columns. + """ + # Take views so we do not alter the originals + left = self.left[:] + right = self.right[:] + + llabels, rlabels = _items_overlap_with_suffix( + self.left._info_axis, self.right._info_axis, self.suffixes + ) + + if left_indexer is not None and not is_range_indexer(left_indexer, len(left)): + # Pinning the index here (and in the right code just below) is not + # necessary, but makes the `.take` more performant if we have e.g. + # a MultiIndex for left.index. + lmgr = left._mgr.reindex_indexer( + join_index, + left_indexer, + axis=1, + copy=False, + only_slice=True, + allow_dups=True, + use_na_proxy=True, + ) + left = left._constructor_from_mgr(lmgr, axes=lmgr.axes) + left.index = join_index + + if right_indexer is not None and not is_range_indexer( + right_indexer, len(right) + ): + rmgr = right._mgr.reindex_indexer( + join_index, + right_indexer, + axis=1, + copy=False, + only_slice=True, + allow_dups=True, + use_na_proxy=True, + ) + right = right._constructor_from_mgr(rmgr, axes=rmgr.axes) + right.index = join_index + + from pandas import concat + + left.columns = llabels + right.columns = rlabels + result = concat([left, right], axis=1, copy=copy) + return result + + def get_result(self, copy: bool | None = True) -> DataFrame: + if self.indicator: + self.left, self.right = self._indicator_pre_merge(self.left, self.right) + + join_index, left_indexer, right_indexer = self._get_join_info() + + result = self._reindex_and_concat( + join_index, left_indexer, right_indexer, copy=copy + ) + result = result.__finalize__(self, method=self._merge_type) + + if self.indicator: + result = self._indicator_post_merge(result) + + self._maybe_add_join_keys(result, left_indexer, right_indexer) + + self._maybe_restore_index_levels(result) + + return result.__finalize__(self, method="merge") + + @final + @cache_readonly + def _indicator_name(self) -> str | None: + if isinstance(self.indicator, str): + return self.indicator + elif isinstance(self.indicator, bool): + return "_merge" if self.indicator else None + else: + raise ValueError( + "indicator option can only accept boolean or string arguments" + ) + + @final + def _indicator_pre_merge( + self, left: DataFrame, right: DataFrame + ) -> tuple[DataFrame, DataFrame]: + columns = left.columns.union(right.columns) + + for i in ["_left_indicator", "_right_indicator"]: + if i in columns: + raise ValueError( + "Cannot use `indicator=True` option when " + f"data contains a column named {i}" + ) + if self._indicator_name in columns: + raise ValueError( + "Cannot use name of an existing column for indicator column" + ) + + left = left.copy() + right = right.copy() + + left["_left_indicator"] = 1 + left["_left_indicator"] = left["_left_indicator"].astype("int8") + + right["_right_indicator"] = 2 + right["_right_indicator"] = right["_right_indicator"].astype("int8") + + return left, right + + @final + def _indicator_post_merge(self, result: DataFrame) -> DataFrame: + result["_left_indicator"] = result["_left_indicator"].fillna(0) + result["_right_indicator"] = result["_right_indicator"].fillna(0) + + result[self._indicator_name] = Categorical( + (result["_left_indicator"] + result["_right_indicator"]), + categories=[1, 2, 3], + ) + result[self._indicator_name] = result[ + self._indicator_name + ].cat.rename_categories(["left_only", "right_only", "both"]) + + result = result.drop(labels=["_left_indicator", "_right_indicator"], axis=1) + return result + + @final + def _maybe_restore_index_levels(self, result: DataFrame) -> None: + """ + Restore index levels specified as `on` parameters + + Here we check for cases where `self.left_on` and `self.right_on` pairs + each reference an index level in their respective DataFrames. The + joined columns corresponding to these pairs are then restored to the + index of `result`. + + **Note:** This method has side effects. It modifies `result` in-place + + Parameters + ---------- + result: DataFrame + merge result + + Returns + ------- + None + """ + names_to_restore = [] + for name, left_key, right_key in zip( + self.join_names, self.left_on, self.right_on + ): + if ( + # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible + # type "Union[Hashable, ExtensionArray, Index, Series]"; expected + # "Hashable" + self.orig_left._is_level_reference(left_key) # type: ignore[arg-type] + # Argument 1 to "_is_level_reference" of "NDFrame" has incompatible + # type "Union[Hashable, ExtensionArray, Index, Series]"; expected + # "Hashable" + and self.orig_right._is_level_reference( + right_key # type: ignore[arg-type] + ) + and left_key == right_key + and name not in result.index.names + ): + names_to_restore.append(name) + + if names_to_restore: + result.set_index(names_to_restore, inplace=True) + + @final + def _maybe_add_join_keys( + self, + result: DataFrame, + left_indexer: npt.NDArray[np.intp] | None, + right_indexer: npt.NDArray[np.intp] | None, + ) -> None: + left_has_missing = None + right_has_missing = None + + assert all(isinstance(x, _known) for x in self.left_join_keys) + + keys = zip(self.join_names, self.left_on, self.right_on) + for i, (name, lname, rname) in enumerate(keys): + if not _should_fill(lname, rname): + continue + + take_left, take_right = None, None + + if name in result: + if left_indexer is not None or right_indexer is not None: + if name in self.left: + if left_has_missing is None: + left_has_missing = ( + False + if left_indexer is None + else (left_indexer == -1).any() + ) + + if left_has_missing: + take_right = self.right_join_keys[i] + + if result[name].dtype != self.left[name].dtype: + take_left = self.left[name]._values + + elif name in self.right: + if right_has_missing is None: + right_has_missing = ( + False + if right_indexer is None + else (right_indexer == -1).any() + ) + + if right_has_missing: + take_left = self.left_join_keys[i] + + if result[name].dtype != self.right[name].dtype: + take_right = self.right[name]._values + + else: + take_left = self.left_join_keys[i] + take_right = self.right_join_keys[i] + + if take_left is not None or take_right is not None: + if take_left is None: + lvals = result[name]._values + elif left_indexer is None: + lvals = take_left + else: + # TODO: can we pin down take_left's type earlier? + take_left = extract_array(take_left, extract_numpy=True) + lfill = na_value_for_dtype(take_left.dtype) + lvals = algos.take_nd(take_left, left_indexer, fill_value=lfill) + + if take_right is None: + rvals = result[name]._values + elif right_indexer is None: + rvals = take_right + else: + # TODO: can we pin down take_right's type earlier? + taker = extract_array(take_right, extract_numpy=True) + rfill = na_value_for_dtype(taker.dtype) + rvals = algos.take_nd(taker, right_indexer, fill_value=rfill) + + # if we have an all missing left_indexer + # make sure to just use the right values or vice-versa + if left_indexer is not None and (left_indexer == -1).all(): + key_col = Index(rvals) + result_dtype = rvals.dtype + elif right_indexer is not None and (right_indexer == -1).all(): + key_col = Index(lvals) + result_dtype = lvals.dtype + else: + key_col = Index(lvals) + if left_indexer is not None: + mask_left = left_indexer == -1 + key_col = key_col.where(~mask_left, rvals) + result_dtype = find_common_type([lvals.dtype, rvals.dtype]) + if ( + lvals.dtype.kind == "M" + and rvals.dtype.kind == "M" + and result_dtype.kind == "O" + ): + # TODO(non-nano) Workaround for common_type not dealing + # with different resolutions + result_dtype = key_col.dtype + + if result._is_label_reference(name): + result[name] = result._constructor_sliced( + key_col, dtype=result_dtype, index=result.index + ) + elif result._is_level_reference(name): + if isinstance(result.index, MultiIndex): + key_col.name = name + idx_list = [ + result.index.get_level_values(level_name) + if level_name != name + else key_col + for level_name in result.index.names + ] + + result.set_index(idx_list, inplace=True) + else: + result.index = Index(key_col, name=name) + else: + result.insert(i, name or f"key_{i}", key_col) + + def _get_join_indexers( + self, + ) -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + """return the join indexers""" + # make mypy happy + assert self.how != "asof" + return get_join_indexers( + self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how + ) + + @final + def _get_join_info( + self, + ) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + left_ax = self.left.index + right_ax = self.right.index + + if self.left_index and self.right_index and self.how != "asof": + join_index, left_indexer, right_indexer = left_ax.join( + right_ax, how=self.how, return_indexers=True, sort=self.sort + ) + + elif self.right_index and self.how == "left": + join_index, left_indexer, right_indexer = _left_join_on_index( + left_ax, right_ax, self.left_join_keys, sort=self.sort + ) + + elif self.left_index and self.how == "right": + join_index, right_indexer, left_indexer = _left_join_on_index( + right_ax, left_ax, self.right_join_keys, sort=self.sort + ) + else: + (left_indexer, right_indexer) = self._get_join_indexers() + + if self.right_index: + if len(self.left) > 0: + join_index = self._create_join_index( + left_ax, + right_ax, + left_indexer, + how="right", + ) + elif right_indexer is None: + join_index = right_ax.copy() + else: + join_index = right_ax.take(right_indexer) + elif self.left_index: + if self.how == "asof": + # GH#33463 asof should always behave like a left merge + join_index = self._create_join_index( + left_ax, + right_ax, + left_indexer, + how="left", + ) + + elif len(self.right) > 0: + join_index = self._create_join_index( + right_ax, + left_ax, + right_indexer, + how="left", + ) + elif left_indexer is None: + join_index = left_ax.copy() + else: + join_index = left_ax.take(left_indexer) + else: + n = len(left_ax) if left_indexer is None else len(left_indexer) + join_index = default_index(n) + + return join_index, left_indexer, right_indexer + + @final + def _create_join_index( + self, + index: Index, + other_index: Index, + indexer: npt.NDArray[np.intp] | None, + how: JoinHow = "left", + ) -> Index: + """ + Create a join index by rearranging one index to match another + + Parameters + ---------- + index : Index + index being rearranged + other_index : Index + used to supply values not found in index + indexer : np.ndarray[np.intp] or None + how to rearrange index + how : str + Replacement is only necessary if indexer based on other_index. + + Returns + ------- + Index + """ + if self.how in (how, "outer") and not isinstance(other_index, MultiIndex): + # if final index requires values in other_index but not target + # index, indexer may hold missing (-1) values, causing Index.take + # to take the final value in target index. So, we set the last + # element to be the desired fill value. We do not use allow_fill + # and fill_value because it throws a ValueError on integer indices + mask = indexer == -1 + if np.any(mask): + fill_value = na_value_for_dtype(index.dtype, compat=False) + index = index.append(Index([fill_value])) + if indexer is None: + return index.copy() + return index.take(indexer) + + @final + def _get_merge_keys( + self, + ) -> tuple[ + list[ArrayLike], + list[ArrayLike], + list[Hashable], + list[Hashable], + list[Hashable], + ]: + """ + Returns + ------- + left_keys, right_keys, join_names, left_drop, right_drop + """ + left_keys: list[ArrayLike] = [] + right_keys: list[ArrayLike] = [] + join_names: list[Hashable] = [] + right_drop: list[Hashable] = [] + left_drop: list[Hashable] = [] + + left, right = self.left, self.right + + is_lkey = lambda x: isinstance(x, _known) and len(x) == len(left) + is_rkey = lambda x: isinstance(x, _known) and len(x) == len(right) + + # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A + # user could, for example, request 'left_index' and 'left_by'. In a + # regular pd.merge(), users cannot specify both 'left_index' and + # 'left_on'. (Instead, users have a MultiIndex). That means the + # self.left_on in this function is always empty in a pd.merge(), but + # a pd.merge_asof(left_index=True, left_by=...) will result in a + # self.left_on array with a None in the middle of it. This requires + # a work-around as designated in the code below. + # See _validate_left_right_on() for where this happens. + + # ugh, spaghetti re #733 + if _any(self.left_on) and _any(self.right_on): + for lk, rk in zip(self.left_on, self.right_on): + lk = extract_array(lk, extract_numpy=True) + rk = extract_array(rk, extract_numpy=True) + if is_lkey(lk): + lk = cast(ArrayLike, lk) + left_keys.append(lk) + if is_rkey(rk): + rk = cast(ArrayLike, rk) + right_keys.append(rk) + join_names.append(None) # what to do? + else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + rk = cast(Hashable, rk) + if rk is not None: + right_keys.append(right._get_label_or_level_values(rk)) + join_names.append(rk) + else: + # work-around for merge_asof(right_index=True) + right_keys.append(right.index._values) + join_names.append(right.index.name) + else: + if not is_rkey(rk): + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + rk = cast(Hashable, rk) + if rk is not None: + right_keys.append(right._get_label_or_level_values(rk)) + else: + # work-around for merge_asof(right_index=True) + right_keys.append(right.index._values) + if lk is not None and lk == rk: # FIXME: what about other NAs? + right_drop.append(rk) + else: + rk = cast(ArrayLike, rk) + right_keys.append(rk) + if lk is not None: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + lk = cast(Hashable, lk) + left_keys.append(left._get_label_or_level_values(lk)) + join_names.append(lk) + else: + # work-around for merge_asof(left_index=True) + left_keys.append(left.index._values) + join_names.append(left.index.name) + elif _any(self.left_on): + for k in self.left_on: + if is_lkey(k): + k = extract_array(k, extract_numpy=True) + k = cast(ArrayLike, k) + left_keys.append(k) + join_names.append(None) + else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + k = cast(Hashable, k) + left_keys.append(left._get_label_or_level_values(k)) + join_names.append(k) + if isinstance(self.right.index, MultiIndex): + right_keys = [ + lev._values.take(lev_codes) + for lev, lev_codes in zip( + self.right.index.levels, self.right.index.codes + ) + ] + else: + right_keys = [self.right.index._values] + elif _any(self.right_on): + for k in self.right_on: + k = extract_array(k, extract_numpy=True) + if is_rkey(k): + k = cast(ArrayLike, k) + right_keys.append(k) + join_names.append(None) + else: + # Then we're either Hashable or a wrong-length arraylike, + # the latter of which will raise + k = cast(Hashable, k) + right_keys.append(right._get_label_or_level_values(k)) + join_names.append(k) + if isinstance(self.left.index, MultiIndex): + left_keys = [ + lev._values.take(lev_codes) + for lev, lev_codes in zip( + self.left.index.levels, self.left.index.codes + ) + ] + else: + left_keys = [self.left.index._values] + + return left_keys, right_keys, join_names, left_drop, right_drop + + @final + def _maybe_coerce_merge_keys(self) -> None: + # we have valid merges but we may have to further + # coerce these if they are originally incompatible types + # + # for example if these are categorical, but are not dtype_equal + # or if we have object and integer dtypes + + for lk, rk, name in zip( + self.left_join_keys, self.right_join_keys, self.join_names + ): + if (len(lk) and not len(rk)) or (not len(lk) and len(rk)): + continue + + lk = extract_array(lk, extract_numpy=True) + rk = extract_array(rk, extract_numpy=True) + + lk_is_cat = isinstance(lk.dtype, CategoricalDtype) + rk_is_cat = isinstance(rk.dtype, CategoricalDtype) + lk_is_object_or_string = is_object_dtype(lk.dtype) or is_string_dtype( + lk.dtype + ) + rk_is_object_or_string = is_object_dtype(rk.dtype) or is_string_dtype( + rk.dtype + ) + + # if either left or right is a categorical + # then the must match exactly in categories & ordered + if lk_is_cat and rk_is_cat: + lk = cast(Categorical, lk) + rk = cast(Categorical, rk) + if lk._categories_match_up_to_permutation(rk): + continue + + elif lk_is_cat or rk_is_cat: + pass + + elif lk.dtype == rk.dtype: + continue + + msg = ( + f"You are trying to merge on {lk.dtype} and {rk.dtype} columns " + f"for key '{name}'. If you wish to proceed you should use pd.concat" + ) + + # if we are numeric, then allow differing + # kinds to proceed, eg. int64 and int8, int and float + # further if we are object, but we infer to + # the same, then proceed + if is_numeric_dtype(lk.dtype) and is_numeric_dtype(rk.dtype): + if lk.dtype.kind == rk.dtype.kind: + continue + + if isinstance(lk.dtype, ExtensionDtype) and not isinstance( + rk.dtype, ExtensionDtype + ): + ct = find_common_type([lk.dtype, rk.dtype]) + if isinstance(ct, ExtensionDtype): + com_cls = ct.construct_array_type() + rk = com_cls._from_sequence(rk, dtype=ct, copy=False) + else: + rk = rk.astype(ct) + elif isinstance(rk.dtype, ExtensionDtype): + ct = find_common_type([lk.dtype, rk.dtype]) + if isinstance(ct, ExtensionDtype): + com_cls = ct.construct_array_type() + lk = com_cls._from_sequence(lk, dtype=ct, copy=False) + else: + lk = lk.astype(ct) + + # check whether ints and floats + if is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype): + # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int + with np.errstate(invalid="ignore"): + # error: Argument 1 to "astype" of "ndarray" has incompatible + # type "Union[ExtensionDtype, Any, dtype[Any]]"; expected + # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]" + casted = lk.astype(rk.dtype) # type: ignore[arg-type] + + mask = ~np.isnan(lk) + match = lk == casted + if not match[mask].all(): + warnings.warn( + "You are merging on int and float " + "columns where the float values " + "are not equal to their int representation.", + UserWarning, + stacklevel=find_stack_level(), + ) + continue + + if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype): + # GH 47391 numpy > 1.24 will raise a RuntimeError for nan -> int + with np.errstate(invalid="ignore"): + # error: Argument 1 to "astype" of "ndarray" has incompatible + # type "Union[ExtensionDtype, Any, dtype[Any]]"; expected + # "Union[dtype[Any], Type[Any], _SupportsDType[dtype[Any]]]" + casted = rk.astype(lk.dtype) # type: ignore[arg-type] + + mask = ~np.isnan(rk) + match = rk == casted + if not match[mask].all(): + warnings.warn( + "You are merging on int and float " + "columns where the float values " + "are not equal to their int representation.", + UserWarning, + stacklevel=find_stack_level(), + ) + continue + + # let's infer and see if we are ok + if lib.infer_dtype(lk, skipna=False) == lib.infer_dtype( + rk, skipna=False + ): + continue + + # Check if we are trying to merge on obviously + # incompatible dtypes GH 9780, GH 15800 + + # bool values are coerced to object + elif (lk_is_object_or_string and is_bool_dtype(rk.dtype)) or ( + is_bool_dtype(lk.dtype) and rk_is_object_or_string + ): + pass + + # object values are allowed to be merged + elif (lk_is_object_or_string and is_numeric_dtype(rk.dtype)) or ( + is_numeric_dtype(lk.dtype) and rk_is_object_or_string + ): + inferred_left = lib.infer_dtype(lk, skipna=False) + inferred_right = lib.infer_dtype(rk, skipna=False) + bool_types = ["integer", "mixed-integer", "boolean", "empty"] + string_types = ["string", "unicode", "mixed", "bytes", "empty"] + + # inferred bool + if inferred_left in bool_types and inferred_right in bool_types: + pass + + # unless we are merging non-string-like with string-like + elif ( + inferred_left in string_types and inferred_right not in string_types + ) or ( + inferred_right in string_types and inferred_left not in string_types + ): + raise ValueError(msg) + + # datetimelikes must match exactly + elif needs_i8_conversion(lk.dtype) and not needs_i8_conversion(rk.dtype): + raise ValueError(msg) + elif not needs_i8_conversion(lk.dtype) and needs_i8_conversion(rk.dtype): + raise ValueError(msg) + elif isinstance(lk.dtype, DatetimeTZDtype) and not isinstance( + rk.dtype, DatetimeTZDtype + ): + raise ValueError(msg) + elif not isinstance(lk.dtype, DatetimeTZDtype) and isinstance( + rk.dtype, DatetimeTZDtype + ): + raise ValueError(msg) + elif ( + isinstance(lk.dtype, DatetimeTZDtype) + and isinstance(rk.dtype, DatetimeTZDtype) + ) or (lk.dtype.kind == "M" and rk.dtype.kind == "M"): + # allows datetime with different resolutions + continue + # datetime and timedelta not allowed + elif lk.dtype.kind == "M" and rk.dtype.kind == "m": + raise ValueError(msg) + elif lk.dtype.kind == "m" and rk.dtype.kind == "M": + raise ValueError(msg) + + elif is_object_dtype(lk.dtype) and is_object_dtype(rk.dtype): + continue + + # Houston, we have a problem! + # let's coerce to object if the dtypes aren't + # categorical, otherwise coerce to the category + # dtype. If we coerced categories to object, + # then we would lose type information on some + # columns, and end up trying to merge + # incompatible dtypes. See GH 16900. + if name in self.left.columns: + typ = cast(Categorical, lk).categories.dtype if lk_is_cat else object + self.left = self.left.copy() + self.left[name] = self.left[name].astype(typ) + if name in self.right.columns: + typ = cast(Categorical, rk).categories.dtype if rk_is_cat else object + self.right = self.right.copy() + self.right[name] = self.right[name].astype(typ) + + def _validate_left_right_on(self, left_on, right_on): + left_on = com.maybe_make_list(left_on) + right_on = com.maybe_make_list(right_on) + + # Hm, any way to make this logic less complicated?? + if self.on is None and left_on is None and right_on is None: + if self.left_index and self.right_index: + left_on, right_on = (), () + elif self.left_index: + raise MergeError("Must pass right_on or right_index=True") + elif self.right_index: + raise MergeError("Must pass left_on or left_index=True") + else: + # use the common columns + left_cols = self.left.columns + right_cols = self.right.columns + common_cols = left_cols.intersection(right_cols) + if len(common_cols) == 0: + raise MergeError( + "No common columns to perform merge on. " + f"Merge options: left_on={left_on}, " + f"right_on={right_on}, " + f"left_index={self.left_index}, " + f"right_index={self.right_index}" + ) + if ( + not left_cols.join(common_cols, how="inner").is_unique + or not right_cols.join(common_cols, how="inner").is_unique + ): + raise MergeError(f"Data columns not unique: {repr(common_cols)}") + left_on = right_on = common_cols + elif self.on is not None: + if left_on is not None or right_on is not None: + raise MergeError( + 'Can only pass argument "on" OR "left_on" ' + 'and "right_on", not a combination of both.' + ) + if self.left_index or self.right_index: + raise MergeError( + 'Can only pass argument "on" OR "left_index" ' + 'and "right_index", not a combination of both.' + ) + left_on = right_on = self.on + elif left_on is not None: + if self.left_index: + raise MergeError( + 'Can only pass argument "left_on" OR "left_index" not both.' + ) + if not self.right_index and right_on is None: + raise MergeError('Must pass "right_on" OR "right_index".') + n = len(left_on) + if self.right_index: + if len(left_on) != self.right.index.nlevels: + raise ValueError( + "len(left_on) must equal the number " + 'of levels in the index of "right"' + ) + right_on = [None] * n + elif right_on is not None: + if self.right_index: + raise MergeError( + 'Can only pass argument "right_on" OR "right_index" not both.' + ) + if not self.left_index and left_on is None: + raise MergeError('Must pass "left_on" OR "left_index".') + n = len(right_on) + if self.left_index: + if len(right_on) != self.left.index.nlevels: + raise ValueError( + "len(right_on) must equal the number " + 'of levels in the index of "left"' + ) + left_on = [None] * n + if len(right_on) != len(left_on): + raise ValueError("len(right_on) must equal len(left_on)") + + return left_on, right_on + + @final + def _validate_validate_kwd(self, validate: str) -> None: + # Check uniqueness of each + if self.left_index: + left_unique = self.orig_left.index.is_unique + else: + left_unique = MultiIndex.from_arrays(self.left_join_keys).is_unique + + if self.right_index: + right_unique = self.orig_right.index.is_unique + else: + right_unique = MultiIndex.from_arrays(self.right_join_keys).is_unique + + # Check data integrity + if validate in ["one_to_one", "1:1"]: + if not left_unique and not right_unique: + raise MergeError( + "Merge keys are not unique in either left " + "or right dataset; not a one-to-one merge" + ) + if not left_unique: + raise MergeError( + "Merge keys are not unique in left dataset; not a one-to-one merge" + ) + if not right_unique: + raise MergeError( + "Merge keys are not unique in right dataset; not a one-to-one merge" + ) + + elif validate in ["one_to_many", "1:m"]: + if not left_unique: + raise MergeError( + "Merge keys are not unique in left dataset; not a one-to-many merge" + ) + + elif validate in ["many_to_one", "m:1"]: + if not right_unique: + raise MergeError( + "Merge keys are not unique in right dataset; " + "not a many-to-one merge" + ) + + elif validate in ["many_to_many", "m:m"]: + pass + + else: + raise ValueError( + f'"{validate}" is not a valid argument. ' + "Valid arguments are:\n" + '- "1:1"\n' + '- "1:m"\n' + '- "m:1"\n' + '- "m:m"\n' + '- "one_to_one"\n' + '- "one_to_many"\n' + '- "many_to_one"\n' + '- "many_to_many"' + ) + + +def get_join_indexers( + left_keys: list[ArrayLike], + right_keys: list[ArrayLike], + sort: bool = False, + how: JoinHow = "inner", +) -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: + """ + + Parameters + ---------- + left_keys : list[ndarray, ExtensionArray, Index, Series] + right_keys : list[ndarray, ExtensionArray, Index, Series] + sort : bool, default False + how : {'inner', 'outer', 'left', 'right'}, default 'inner' + + Returns + ------- + np.ndarray[np.intp] or None + Indexer into the left_keys. + np.ndarray[np.intp] or None + Indexer into the right_keys. + """ + assert len(left_keys) == len( + right_keys + ), "left_keys and right_keys must be the same length" + + # fast-path for empty left/right + left_n = len(left_keys[0]) + right_n = len(right_keys[0]) + if left_n == 0: + if how in ["left", "inner"]: + return _get_empty_indexer() + elif not sort and how in ["right", "outer"]: + return _get_no_sort_one_missing_indexer(right_n, True) + elif right_n == 0: + if how in ["right", "inner"]: + return _get_empty_indexer() + elif not sort and how in ["left", "outer"]: + return _get_no_sort_one_missing_indexer(left_n, False) + + lkey: ArrayLike + rkey: ArrayLike + if len(left_keys) > 1: + # get left & right join labels and num. of levels at each location + mapped = ( + _factorize_keys(left_keys[n], right_keys[n], sort=sort) + for n in range(len(left_keys)) + ) + zipped = zip(*mapped) + llab, rlab, shape = (list(x) for x in zipped) + + # get flat i8 keys from label lists + lkey, rkey = _get_join_keys(llab, rlab, tuple(shape), sort) + else: + lkey = left_keys[0] + rkey = right_keys[0] + + left = Index(lkey) + right = Index(rkey) + + if ( + left.is_monotonic_increasing + and right.is_monotonic_increasing + and (left.is_unique or right.is_unique) + ): + _, lidx, ridx = left.join(right, how=how, return_indexers=True, sort=sort) + else: + lidx, ridx = get_join_indexers_non_unique( + left._values, right._values, sort, how + ) + + if lidx is not None and is_range_indexer(lidx, len(left)): + lidx = None + if ridx is not None and is_range_indexer(ridx, len(right)): + ridx = None + return lidx, ridx + + +def get_join_indexers_non_unique( + left: ArrayLike, + right: ArrayLike, + sort: bool = False, + how: JoinHow = "inner", +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """ + Get join indexers for left and right. + + Parameters + ---------- + left : ArrayLike + right : ArrayLike + sort : bool, default False + how : {'inner', 'outer', 'left', 'right'}, default 'inner' + + Returns + ------- + np.ndarray[np.intp] + Indexer into left. + np.ndarray[np.intp] + Indexer into right. + """ + lkey, rkey, count = _factorize_keys(left, right, sort=sort) + if how == "left": + lidx, ridx = libjoin.left_outer_join(lkey, rkey, count, sort=sort) + elif how == "right": + ridx, lidx = libjoin.left_outer_join(rkey, lkey, count, sort=sort) + elif how == "inner": + lidx, ridx = libjoin.inner_join(lkey, rkey, count, sort=sort) + elif how == "outer": + lidx, ridx = libjoin.full_outer_join(lkey, rkey, count) + return lidx, ridx + + +def restore_dropped_levels_multijoin( + left: MultiIndex, + right: MultiIndex, + dropped_level_names, + join_index: Index, + lindexer: npt.NDArray[np.intp], + rindexer: npt.NDArray[np.intp], +) -> tuple[FrozenList, FrozenList, FrozenList]: + """ + *this is an internal non-public method* + + Returns the levels, labels and names of a multi-index to multi-index join. + Depending on the type of join, this method restores the appropriate + dropped levels of the joined multi-index. + The method relies on lindexer, rindexer which hold the index positions of + left and right, where a join was feasible + + Parameters + ---------- + left : MultiIndex + left index + right : MultiIndex + right index + dropped_level_names : str array + list of non-common level names + join_index : Index + the index of the join between the + common levels of left and right + lindexer : np.ndarray[np.intp] + left indexer + rindexer : np.ndarray[np.intp] + right indexer + + Returns + ------- + levels : list of Index + levels of combined multiindexes + labels : np.ndarray[np.intp] + labels of combined multiindexes + names : List[Hashable] + names of combined multiindex levels + + """ + + def _convert_to_multiindex(index: Index) -> MultiIndex: + if isinstance(index, MultiIndex): + return index + else: + return MultiIndex.from_arrays([index._values], names=[index.name]) + + # For multi-multi joins with one overlapping level, + # the returned index if of type Index + # Assure that join_index is of type MultiIndex + # so that dropped levels can be appended + join_index = _convert_to_multiindex(join_index) + + join_levels = join_index.levels + join_codes = join_index.codes + join_names = join_index.names + + # Iterate through the levels that must be restored + for dropped_level_name in dropped_level_names: + if dropped_level_name in left.names: + idx = left + indexer = lindexer + else: + idx = right + indexer = rindexer + + # The index of the level name to be restored + name_idx = idx.names.index(dropped_level_name) + + restore_levels = idx.levels[name_idx] + # Inject -1 in the codes list where a join was not possible + # IOW indexer[i]=-1 + codes = idx.codes[name_idx] + if indexer is None: + restore_codes = codes + else: + restore_codes = algos.take_nd(codes, indexer, fill_value=-1) + + # error: Cannot determine type of "__add__" + join_levels = join_levels + [restore_levels] # type: ignore[has-type] + join_codes = join_codes + [restore_codes] # type: ignore[has-type] + join_names = join_names + [dropped_level_name] + + return join_levels, join_codes, join_names + + +class _OrderedMerge(_MergeOperation): + _merge_type = "ordered_merge" + + def __init__( + self, + left: DataFrame | Series, + right: DataFrame | Series, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + suffixes: Suffixes = ("_x", "_y"), + fill_method: str | None = None, + how: JoinHow | Literal["asof"] = "outer", + ) -> None: + self.fill_method = fill_method + _MergeOperation.__init__( + self, + left, + right, + on=on, + left_on=left_on, + left_index=left_index, + right_index=right_index, + right_on=right_on, + how=how, + suffixes=suffixes, + sort=True, # factorize sorts + ) + + def get_result(self, copy: bool | None = True) -> DataFrame: + join_index, left_indexer, right_indexer = self._get_join_info() + + left_join_indexer: npt.NDArray[np.intp] | None + right_join_indexer: npt.NDArray[np.intp] | None + + if self.fill_method == "ffill": + if left_indexer is None: + left_join_indexer = None + else: + left_join_indexer = libjoin.ffill_indexer(left_indexer) + if right_indexer is None: + right_join_indexer = None + else: + right_join_indexer = libjoin.ffill_indexer(right_indexer) + elif self.fill_method is None: + left_join_indexer = left_indexer + right_join_indexer = right_indexer + else: + raise ValueError("fill_method must be 'ffill' or None") + + result = self._reindex_and_concat( + join_index, left_join_indexer, right_join_indexer, copy=copy + ) + self._maybe_add_join_keys(result, left_indexer, right_indexer) + + return result + + +def _asof_by_function(direction: str): + name = f"asof_join_{direction}_on_X_by_Y" + return getattr(libjoin, name, None) + + +class _AsOfMerge(_OrderedMerge): + _merge_type = "asof_merge" + + def __init__( + self, + left: DataFrame | Series, + right: DataFrame | Series, + on: IndexLabel | None = None, + left_on: IndexLabel | None = None, + right_on: IndexLabel | None = None, + left_index: bool = False, + right_index: bool = False, + by=None, + left_by=None, + right_by=None, + suffixes: Suffixes = ("_x", "_y"), + how: Literal["asof"] = "asof", + tolerance=None, + allow_exact_matches: bool = True, + direction: str = "backward", + ) -> None: + self.by = by + self.left_by = left_by + self.right_by = right_by + self.tolerance = tolerance + self.allow_exact_matches = allow_exact_matches + self.direction = direction + + # check 'direction' is valid + if self.direction not in ["backward", "forward", "nearest"]: + raise MergeError(f"direction invalid: {self.direction}") + + # validate allow_exact_matches + if not is_bool(self.allow_exact_matches): + msg = ( + "allow_exact_matches must be boolean, " + f"passed {self.allow_exact_matches}" + ) + raise MergeError(msg) + + _OrderedMerge.__init__( + self, + left, + right, + on=on, + left_on=left_on, + right_on=right_on, + left_index=left_index, + right_index=right_index, + how=how, + suffixes=suffixes, + fill_method=None, + ) + + def _validate_left_right_on(self, left_on, right_on): + left_on, right_on = super()._validate_left_right_on(left_on, right_on) + + # we only allow on to be a single item for on + if len(left_on) != 1 and not self.left_index: + raise MergeError("can only asof on a key for left") + + if len(right_on) != 1 and not self.right_index: + raise MergeError("can only asof on a key for right") + + if self.left_index and isinstance(self.left.index, MultiIndex): + raise MergeError("left can only have one index") + + if self.right_index and isinstance(self.right.index, MultiIndex): + raise MergeError("right can only have one index") + + # set 'by' columns + if self.by is not None: + if self.left_by is not None or self.right_by is not None: + raise MergeError("Can only pass by OR left_by and right_by") + self.left_by = self.right_by = self.by + if self.left_by is None and self.right_by is not None: + raise MergeError("missing left_by") + if self.left_by is not None and self.right_by is None: + raise MergeError("missing right_by") + + # GH#29130 Check that merge keys do not have dtype object + if not self.left_index: + left_on_0 = left_on[0] + if isinstance(left_on_0, _known): + lo_dtype = left_on_0.dtype + else: + lo_dtype = ( + self.left._get_label_or_level_values(left_on_0).dtype + if left_on_0 in self.left.columns + else self.left.index.get_level_values(left_on_0) + ) + else: + lo_dtype = self.left.index.dtype + + if not self.right_index: + right_on_0 = right_on[0] + if isinstance(right_on_0, _known): + ro_dtype = right_on_0.dtype + else: + ro_dtype = ( + self.right._get_label_or_level_values(right_on_0).dtype + if right_on_0 in self.right.columns + else self.right.index.get_level_values(right_on_0) + ) + else: + ro_dtype = self.right.index.dtype + + if ( + is_object_dtype(lo_dtype) + or is_object_dtype(ro_dtype) + or is_string_dtype(lo_dtype) + or is_string_dtype(ro_dtype) + ): + raise MergeError( + f"Incompatible merge dtype, {repr(ro_dtype)} and " + f"{repr(lo_dtype)}, both sides must have numeric dtype" + ) + + # add 'by' to our key-list so we can have it in the + # output as a key + if self.left_by is not None: + if not is_list_like(self.left_by): + self.left_by = [self.left_by] + if not is_list_like(self.right_by): + self.right_by = [self.right_by] + + if len(self.left_by) != len(self.right_by): + raise MergeError("left_by and right_by must be the same length") + + left_on = self.left_by + list(left_on) + right_on = self.right_by + list(right_on) + + return left_on, right_on + + def _maybe_require_matching_dtypes( + self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike] + ) -> None: + # TODO: why do we do this for AsOfMerge but not the others? + + def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int): + if left.dtype != right.dtype: + if isinstance(left.dtype, CategoricalDtype) and isinstance( + right.dtype, CategoricalDtype + ): + # The generic error message is confusing for categoricals. + # + # In this function, the join keys include both the original + # ones of the merge_asof() call, and also the keys passed + # to its by= argument. Unordered but equal categories + # are not supported for the former, but will fail + # later with a ValueError, so we don't *need* to check + # for them here. + msg = ( + f"incompatible merge keys [{i}] {repr(left.dtype)} and " + f"{repr(right.dtype)}, both sides category, but not equal ones" + ) + else: + msg = ( + f"incompatible merge keys [{i}] {repr(left.dtype)} and " + f"{repr(right.dtype)}, must be the same type" + ) + raise MergeError(msg) + + # validate index types are the same + for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): + _check_dtype_match(lk, rk, i) + + if self.left_index: + lt = self.left.index._values + else: + lt = left_join_keys[-1] + + if self.right_index: + rt = self.right.index._values + else: + rt = right_join_keys[-1] + + _check_dtype_match(lt, rt, 0) + + def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None: + # validate tolerance; datetime.timedelta or Timedelta if we have a DTI + if self.tolerance is not None: + if self.left_index: + lt = self.left.index._values + else: + lt = left_join_keys[-1] + + msg = ( + f"incompatible tolerance {self.tolerance}, must be compat " + f"with type {repr(lt.dtype)}" + ) + + if needs_i8_conversion(lt.dtype) or ( + isinstance(lt, ArrowExtensionArray) and lt.dtype.kind in "mM" + ): + if not isinstance(self.tolerance, datetime.timedelta): + raise MergeError(msg) + if self.tolerance < Timedelta(0): + raise MergeError("tolerance must be positive") + + elif is_integer_dtype(lt.dtype): + if not is_integer(self.tolerance): + raise MergeError(msg) + if self.tolerance < 0: + raise MergeError("tolerance must be positive") + + elif is_float_dtype(lt.dtype): + if not is_number(self.tolerance): + raise MergeError(msg) + # error: Unsupported operand types for > ("int" and "Number") + if self.tolerance < 0: # type: ignore[operator] + raise MergeError("tolerance must be positive") + + else: + raise MergeError("key must be integer, timestamp or float") + + def _convert_values_for_libjoin( + self, values: AnyArrayLike, side: str + ) -> np.ndarray: + # we require sortedness and non-null values in the join keys + if not Index(values).is_monotonic_increasing: + if isna(values).any(): + raise ValueError(f"Merge keys contain null values on {side} side") + raise ValueError(f"{side} keys must be sorted") + + if isinstance(values, ArrowExtensionArray): + values = values._maybe_convert_datelike_array() + + if needs_i8_conversion(values.dtype): + values = values.view("i8") + + elif isinstance(values, BaseMaskedArray): + # we've verified above that no nulls exist + values = values._data + elif isinstance(values, ExtensionArray): + values = values.to_numpy() + + # error: Incompatible return value type (got "Union[ExtensionArray, + # Any, ndarray[Any, Any], ndarray[Any, dtype[Any]], Index, Series]", + # expected "ndarray[Any, Any]") + return values # type: ignore[return-value] + + def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """return the join indexers""" + + # values to compare + left_values = ( + self.left.index._values if self.left_index else self.left_join_keys[-1] + ) + right_values = ( + self.right.index._values if self.right_index else self.right_join_keys[-1] + ) + + # _maybe_require_matching_dtypes already checked for dtype matching + assert left_values.dtype == right_values.dtype + + tolerance = self.tolerance + if tolerance is not None: + # TODO: can we reuse a tolerance-conversion function from + # e.g. TimedeltaIndex? + if needs_i8_conversion(left_values.dtype) or ( + isinstance(left_values, ArrowExtensionArray) + and left_values.dtype.kind in "mM" + ): + tolerance = Timedelta(tolerance) + # TODO: we have no test cases with PeriodDtype here; probably + # need to adjust tolerance for that case. + if left_values.dtype.kind in "mM": + # Make sure the i8 representation for tolerance + # matches that for left_values/right_values. + if isinstance(left_values, ArrowExtensionArray): + unit = left_values.dtype.pyarrow_dtype.unit + else: + unit = ensure_wrapped_if_datetimelike(left_values).unit + tolerance = tolerance.as_unit(unit) + + tolerance = tolerance._value + + # initial type conversion as needed + left_values = self._convert_values_for_libjoin(left_values, "left") + right_values = self._convert_values_for_libjoin(right_values, "right") + + # a "by" parameter requires special handling + if self.left_by is not None: + # remove 'on' parameter from values if one existed + if self.left_index and self.right_index: + left_join_keys = self.left_join_keys + right_join_keys = self.right_join_keys + else: + left_join_keys = self.left_join_keys[0:-1] + right_join_keys = self.right_join_keys[0:-1] + + mapped = [ + _factorize_keys( + left_join_keys[n], + right_join_keys[n], + sort=False, + ) + for n in range(len(left_join_keys)) + ] + + if len(left_join_keys) == 1: + left_by_values = mapped[0][0] + right_by_values = mapped[0][1] + else: + arrs = [np.concatenate(m[:2]) for m in mapped] + shape = tuple(m[2] for m in mapped) + group_index = get_group_index( + arrs, shape=shape, sort=False, xnull=False + ) + left_len = len(left_join_keys[0]) + left_by_values = group_index[:left_len] + right_by_values = group_index[left_len:] + + left_by_values = ensure_int64(left_by_values) + right_by_values = ensure_int64(right_by_values) + + # choose appropriate function by type + func = _asof_by_function(self.direction) + return func( + left_values, + right_values, + left_by_values, + right_by_values, + self.allow_exact_matches, + tolerance, + ) + else: + # choose appropriate function by type + func = _asof_by_function(self.direction) + return func( + left_values, + right_values, + None, + None, + self.allow_exact_matches, + tolerance, + False, + ) + + +def _get_multiindex_indexer( + join_keys: list[ArrayLike], index: MultiIndex, sort: bool +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + # left & right join labels and num. of levels at each location + mapped = ( + _factorize_keys(index.levels[n]._values, join_keys[n], sort=sort) + for n in range(index.nlevels) + ) + zipped = zip(*mapped) + rcodes, lcodes, shape = (list(x) for x in zipped) + if sort: + rcodes = list(map(np.take, rcodes, index.codes)) + else: + i8copy = lambda a: a.astype("i8", subok=False, copy=True) + rcodes = list(map(i8copy, index.codes)) + + # fix right labels if there were any nulls + for i, join_key in enumerate(join_keys): + mask = index.codes[i] == -1 + if mask.any(): + # check if there already was any nulls at this location + # if there was, it is factorized to `shape[i] - 1` + a = join_key[lcodes[i] == shape[i] - 1] + if a.size == 0 or not a[0] != a[0]: + shape[i] += 1 + + rcodes[i][mask] = shape[i] - 1 + + # get flat i8 join keys + lkey, rkey = _get_join_keys(lcodes, rcodes, tuple(shape), sort) + return lkey, rkey + + +def _get_empty_indexer() -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """Return empty join indexers.""" + return ( + np.array([], dtype=np.intp), + np.array([], dtype=np.intp), + ) + + +def _get_no_sort_one_missing_indexer( + n: int, left_missing: bool +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + """ + Return join indexers where all of one side is selected without sorting + and none of the other side is selected. + + Parameters + ---------- + n : int + Length of indexers to create. + left_missing : bool + If True, the left indexer will contain only -1's. + If False, the right indexer will contain only -1's. + + Returns + ------- + np.ndarray[np.intp] + Left indexer + np.ndarray[np.intp] + Right indexer + """ + idx = np.arange(n, dtype=np.intp) + idx_missing = np.full(shape=n, fill_value=-1, dtype=np.intp) + if left_missing: + return idx_missing, idx + return idx, idx_missing + + +def _left_join_on_index( + left_ax: Index, right_ax: Index, join_keys: list[ArrayLike], sort: bool = False +) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp]]: + if isinstance(right_ax, MultiIndex): + lkey, rkey = _get_multiindex_indexer(join_keys, right_ax, sort=sort) + else: + # error: Incompatible types in assignment (expression has type + # "Union[Union[ExtensionArray, ndarray[Any, Any]], Index, Series]", + # variable has type "ndarray[Any, dtype[signedinteger[Any]]]") + lkey = join_keys[0] # type: ignore[assignment] + # error: Incompatible types in assignment (expression has type "Index", + # variable has type "ndarray[Any, dtype[signedinteger[Any]]]") + rkey = right_ax._values # type: ignore[assignment] + + left_key, right_key, count = _factorize_keys(lkey, rkey, sort=sort) + left_indexer, right_indexer = libjoin.left_outer_join( + left_key, right_key, count, sort=sort + ) + + if sort or len(left_ax) != len(left_indexer): + # if asked to sort or there are 1-to-many matches + join_index = left_ax.take(left_indexer) + return join_index, left_indexer, right_indexer + + # left frame preserves order & length of its index + return left_ax, None, right_indexer + + +def _factorize_keys( + lk: ArrayLike, rk: ArrayLike, sort: bool = True +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: + """ + Encode left and right keys as enumerated types. + + This is used to get the join indexers to be used when merging DataFrames. + + Parameters + ---------- + lk : ndarray, ExtensionArray + Left key. + rk : ndarray, ExtensionArray + Right key. + sort : bool, defaults to True + If True, the encoding is done such that the unique elements in the + keys are sorted. + + Returns + ------- + np.ndarray[np.intp] + Left (resp. right if called with `key='right'`) labels, as enumerated type. + np.ndarray[np.intp] + Right (resp. left if called with `key='right'`) labels, as enumerated type. + int + Number of unique elements in union of left and right labels. + + See Also + -------- + merge : Merge DataFrame or named Series objects + with a database-style join. + algorithms.factorize : Encode the object as an enumerated type + or categorical variable. + + Examples + -------- + >>> lk = np.array(["a", "c", "b"]) + >>> rk = np.array(["a", "c"]) + + Here, the unique values are `'a', 'b', 'c'`. With the default + `sort=True`, the encoding will be `{0: 'a', 1: 'b', 2: 'c'}`: + + >>> pd.core.reshape.merge._factorize_keys(lk, rk) + (array([0, 2, 1]), array([0, 2]), 3) + + With the `sort=False`, the encoding will correspond to the order + in which the unique elements first appear: `{0: 'a', 1: 'c', 2: 'b'}`: + + >>> pd.core.reshape.merge._factorize_keys(lk, rk, sort=False) + (array([0, 1, 2]), array([0, 1]), 3) + """ + # TODO: if either is a RangeIndex, we can likely factorize more efficiently? + + if ( + isinstance(lk.dtype, DatetimeTZDtype) and isinstance(rk.dtype, DatetimeTZDtype) + ) or (lib.is_np_dtype(lk.dtype, "M") and lib.is_np_dtype(rk.dtype, "M")): + # Extract the ndarray (UTC-localized) values + # Note: we dont need the dtypes to match, as these can still be compared + lk, rk = cast("DatetimeArray", lk)._ensure_matching_resos(rk) + lk = cast("DatetimeArray", lk)._ndarray + rk = cast("DatetimeArray", rk)._ndarray + + elif ( + isinstance(lk.dtype, CategoricalDtype) + and isinstance(rk.dtype, CategoricalDtype) + and lk.dtype == rk.dtype + ): + assert isinstance(lk, Categorical) + assert isinstance(rk, Categorical) + # Cast rk to encoding so we can compare codes with lk + + rk = lk._encode_with_my_categories(rk) + + lk = ensure_int64(lk.codes) + rk = ensure_int64(rk.codes) + + elif isinstance(lk, ExtensionArray) and lk.dtype == rk.dtype: + if (isinstance(lk.dtype, ArrowDtype) and is_string_dtype(lk.dtype)) or ( + isinstance(lk.dtype, StringDtype) + and lk.dtype.storage in ["pyarrow", "pyarrow_numpy"] + ): + import pyarrow as pa + import pyarrow.compute as pc + + len_lk = len(lk) + lk = lk._pa_array # type: ignore[attr-defined] + rk = rk._pa_array # type: ignore[union-attr] + dc = ( + pa.chunked_array(lk.chunks + rk.chunks) # type: ignore[union-attr] + .combine_chunks() + .dictionary_encode() + ) + + llab, rlab, count = ( + pc.fill_null(dc.indices[slice(len_lk)], -1) + .to_numpy() + .astype(np.intp, copy=False), + pc.fill_null(dc.indices[slice(len_lk, None)], -1) + .to_numpy() + .astype(np.intp, copy=False), + len(dc.dictionary), + ) + + if sort: + uniques = dc.dictionary.to_numpy(zero_copy_only=False) + llab, rlab = _sort_labels(uniques, llab, rlab) + + if dc.null_count > 0: + lmask = llab == -1 + lany = lmask.any() + rmask = rlab == -1 + rany = rmask.any() + if lany: + np.putmask(llab, lmask, count) + if rany: + np.putmask(rlab, rmask, count) + count += 1 + return llab, rlab, count + + if not isinstance(lk, BaseMaskedArray) and not ( + # exclude arrow dtypes that would get cast to object + isinstance(lk.dtype, ArrowDtype) + and ( + is_numeric_dtype(lk.dtype.numpy_dtype) + or is_string_dtype(lk.dtype) + and not sort + ) + ): + lk, _ = lk._values_for_factorize() + + # error: Item "ndarray" of "Union[Any, ndarray]" has no attribute + # "_values_for_factorize" + rk, _ = rk._values_for_factorize() # type: ignore[union-attr] + + if needs_i8_conversion(lk.dtype) and lk.dtype == rk.dtype: + # GH#23917 TODO: Needs tests for non-matching dtypes + # GH#23917 TODO: needs tests for case where lk is integer-dtype + # and rk is datetime-dtype + lk = np.asarray(lk, dtype=np.int64) + rk = np.asarray(rk, dtype=np.int64) + + klass, lk, rk = _convert_arrays_and_get_rizer_klass(lk, rk) + + rizer = klass(max(len(lk), len(rk))) + + if isinstance(lk, BaseMaskedArray): + assert isinstance(rk, BaseMaskedArray) + llab = rizer.factorize(lk._data, mask=lk._mask) + rlab = rizer.factorize(rk._data, mask=rk._mask) + elif isinstance(lk, ArrowExtensionArray): + assert isinstance(rk, ArrowExtensionArray) + # we can only get here with numeric dtypes + # TODO: Remove when we have a Factorizer for Arrow + llab = rizer.factorize( + lk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype), mask=lk.isna() + ) + rlab = rizer.factorize( + rk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype), mask=rk.isna() + ) + else: + # Argument 1 to "factorize" of "ObjectFactorizer" has incompatible type + # "Union[ndarray[Any, dtype[signedinteger[_64Bit]]], + # ndarray[Any, dtype[object_]]]"; expected "ndarray[Any, dtype[object_]]" + llab = rizer.factorize(lk) # type: ignore[arg-type] + rlab = rizer.factorize(rk) # type: ignore[arg-type] + assert llab.dtype == np.dtype(np.intp), llab.dtype + assert rlab.dtype == np.dtype(np.intp), rlab.dtype + + count = rizer.get_count() + + if sort: + uniques = rizer.uniques.to_array() + llab, rlab = _sort_labels(uniques, llab, rlab) + + # NA group + lmask = llab == -1 + lany = lmask.any() + rmask = rlab == -1 + rany = rmask.any() + + if lany or rany: + if lany: + np.putmask(llab, lmask, count) + if rany: + np.putmask(rlab, rmask, count) + count += 1 + + return llab, rlab, count + + +def _convert_arrays_and_get_rizer_klass( + lk: ArrayLike, rk: ArrayLike +) -> tuple[type[libhashtable.Factorizer], ArrayLike, ArrayLike]: + klass: type[libhashtable.Factorizer] + if is_numeric_dtype(lk.dtype): + if lk.dtype != rk.dtype: + dtype = find_common_type([lk.dtype, rk.dtype]) + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + if not isinstance(lk, ExtensionArray): + lk = cls._from_sequence(lk, dtype=dtype, copy=False) + else: + lk = lk.astype(dtype, copy=False) + + if not isinstance(rk, ExtensionArray): + rk = cls._from_sequence(rk, dtype=dtype, copy=False) + else: + rk = rk.astype(dtype, copy=False) + else: + lk = lk.astype(dtype, copy=False) + rk = rk.astype(dtype, copy=False) + if isinstance(lk, BaseMaskedArray): + # Invalid index type "type" for "Dict[Type[object], Type[Factorizer]]"; + # expected type "Type[object]" + klass = _factorizers[lk.dtype.type] # type: ignore[index] + elif isinstance(lk.dtype, ArrowDtype): + klass = _factorizers[lk.dtype.numpy_dtype.type] + else: + klass = _factorizers[lk.dtype.type] + + else: + klass = libhashtable.ObjectFactorizer + lk = ensure_object(lk) + rk = ensure_object(rk) + return klass, lk, rk + + +def _sort_labels( + uniques: np.ndarray, left: npt.NDArray[np.intp], right: npt.NDArray[np.intp] +) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: + llength = len(left) + labels = np.concatenate([left, right]) + + _, new_labels = algos.safe_sort(uniques, labels, use_na_sentinel=True) + new_left, new_right = new_labels[:llength], new_labels[llength:] + + return new_left, new_right + + +def _get_join_keys( + llab: list[npt.NDArray[np.int64 | np.intp]], + rlab: list[npt.NDArray[np.int64 | np.intp]], + shape: Shape, + sort: bool, +) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: + # how many levels can be done without overflow + nlev = next( + lev + for lev in range(len(shape), 0, -1) + if not is_int64_overflow_possible(shape[:lev]) + ) + + # get keys for the first `nlev` levels + stride = np.prod(shape[1:nlev], dtype="i8") + lkey = stride * llab[0].astype("i8", subok=False, copy=False) + rkey = stride * rlab[0].astype("i8", subok=False, copy=False) + + for i in range(1, nlev): + with np.errstate(divide="ignore"): + stride //= shape[i] + lkey += llab[i] * stride + rkey += rlab[i] * stride + + if nlev == len(shape): # all done! + return lkey, rkey + + # densify current keys to avoid overflow + lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort) + + llab = [lkey] + llab[nlev:] + rlab = [rkey] + rlab[nlev:] + shape = (count,) + shape[nlev:] + + return _get_join_keys(llab, rlab, shape, sort) + + +def _should_fill(lname, rname) -> bool: + if not isinstance(lname, str) or not isinstance(rname, str): + return True + return lname == rname + + +def _any(x) -> bool: + return x is not None and com.any_not_none(*x) + + +def _validate_operand(obj: DataFrame | Series) -> DataFrame: + if isinstance(obj, ABCDataFrame): + return obj + elif isinstance(obj, ABCSeries): + if obj.name is None: + raise ValueError("Cannot merge a Series without a name") + return obj.to_frame() + else: + raise TypeError( + f"Can only merge Series or DataFrame objects, a {type(obj)} was passed" + ) + + +def _items_overlap_with_suffix( + left: Index, right: Index, suffixes: Suffixes +) -> tuple[Index, Index]: + """ + Suffixes type validation. + + If two indices overlap, add suffixes to overlapping entries. + + If corresponding suffix is empty, the entry is simply converted to string. + + """ + if not is_list_like(suffixes, allow_sets=False) or isinstance(suffixes, dict): + raise TypeError( + f"Passing 'suffixes' as a {type(suffixes)}, is not supported. " + "Provide 'suffixes' as a tuple instead." + ) + + to_rename = left.intersection(right) + if len(to_rename) == 0: + return left, right + + lsuffix, rsuffix = suffixes + + if not lsuffix and not rsuffix: + raise ValueError(f"columns overlap but no suffix specified: {to_rename}") + + def renamer(x, suffix: str | None): + """ + Rename the left and right indices. + + If there is overlap, and suffix is not None, add + suffix, otherwise, leave it as-is. + + Parameters + ---------- + x : original column name + suffix : str or None + + Returns + ------- + x : renamed column name + """ + if x in to_rename and suffix is not None: + return f"{x}{suffix}" + return x + + lrenamer = partial(renamer, suffix=lsuffix) + rrenamer = partial(renamer, suffix=rsuffix) + + llabels = left._transform_index(lrenamer) + rlabels = right._transform_index(rrenamer) + + dups = [] + if not llabels.is_unique: + # Only warn when duplicates are caused because of suffixes, already duplicated + # columns in origin should not warn + dups = llabels[(llabels.duplicated()) & (~left.duplicated())].tolist() + if not rlabels.is_unique: + dups.extend(rlabels[(rlabels.duplicated()) & (~right.duplicated())].tolist()) + if dups: + raise MergeError( + f"Passing 'suffixes' which cause duplicate columns {set(dups)} is " + f"not allowed.", + ) + + return llabels, rlabels diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/pivot.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/pivot.py new file mode 100644 index 0000000000000000000000000000000000000000..b2a915589cba756c16ecaa95c3d056bd7c902c68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/pivot.py @@ -0,0 +1,899 @@ +from __future__ import annotations + +from collections.abc import ( + Hashable, + Sequence, +) +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.util._decorators import ( + Appender, + Substitution, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import maybe_downcast_to_dtype +from pandas.core.dtypes.common import ( + is_list_like, + is_nested_list_like, + is_scalar, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +import pandas.core.common as com +from pandas.core.frame import _shared_docs +from pandas.core.groupby import Grouper +from pandas.core.indexes.api import ( + Index, + MultiIndex, + get_objs_combined_axis, +) +from pandas.core.reshape.concat import concat +from pandas.core.reshape.util import cartesian_product +from pandas.core.series import Series + +if TYPE_CHECKING: + from pandas._typing import ( + AggFuncType, + AggFuncTypeBase, + AggFuncTypeDict, + IndexLabel, + ) + + from pandas import DataFrame + + +# Note: We need to make sure `frame` is imported before `pivot`, otherwise +# _shared_docs['pivot_table'] will not yet exist. TODO: Fix this dependency +@Substitution("\ndata : DataFrame") +@Appender(_shared_docs["pivot_table"], indents=1) +def pivot_table( + data: DataFrame, + values=None, + index=None, + columns=None, + aggfunc: AggFuncType = "mean", + fill_value=None, + margins: bool = False, + dropna: bool = True, + margins_name: Hashable = "All", + observed: bool | lib.NoDefault = lib.no_default, + sort: bool = True, +) -> DataFrame: + index = _convert_by(index) + columns = _convert_by(columns) + + if isinstance(aggfunc, list): + pieces: list[DataFrame] = [] + keys = [] + for func in aggfunc: + _table = __internal_pivot_table( + data, + values=values, + index=index, + columns=columns, + fill_value=fill_value, + aggfunc=func, + margins=margins, + dropna=dropna, + margins_name=margins_name, + observed=observed, + sort=sort, + ) + pieces.append(_table) + keys.append(getattr(func, "__name__", func)) + + table = concat(pieces, keys=keys, axis=1) + return table.__finalize__(data, method="pivot_table") + + table = __internal_pivot_table( + data, + values, + index, + columns, + aggfunc, + fill_value, + margins, + dropna, + margins_name, + observed, + sort, + ) + return table.__finalize__(data, method="pivot_table") + + +def __internal_pivot_table( + data: DataFrame, + values, + index, + columns, + aggfunc: AggFuncTypeBase | AggFuncTypeDict, + fill_value, + margins: bool, + dropna: bool, + margins_name: Hashable, + observed: bool | lib.NoDefault, + sort: bool, +) -> DataFrame: + """ + Helper of :func:`pandas.pivot_table` for any non-list ``aggfunc``. + """ + keys = index + columns + + values_passed = values is not None + if values_passed: + if is_list_like(values): + values_multi = True + values = list(values) + else: + values_multi = False + values = [values] + + # GH14938 Make sure value labels are in data + for i in values: + if i not in data: + raise KeyError(i) + + to_filter = [] + for x in keys + values: + if isinstance(x, Grouper): + x = x.key + try: + if x in data: + to_filter.append(x) + except TypeError: + pass + if len(to_filter) < len(data.columns): + data = data[to_filter] + + else: + values = data.columns + for key in keys: + try: + values = values.drop(key) + except (TypeError, ValueError, KeyError): + pass + values = list(values) + + observed_bool = False if observed is lib.no_default else observed + grouped = data.groupby(keys, observed=observed_bool, sort=sort, dropna=dropna) + if observed is lib.no_default and any( + ping._passed_categorical for ping in grouped._grouper.groupings + ): + warnings.warn( + "The default value of observed=False is deprecated and will change " + "to observed=True in a future version of pandas. Specify " + "observed=False to silence this warning and retain the current behavior", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + agged = grouped.agg(aggfunc) + + if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns): + agged = agged.dropna(how="all") + + table = agged + + # GH17038, this check should only happen if index is defined (not None) + if table.index.nlevels > 1 and index: + # Related GH #17123 + # If index_names are integers, determine whether the integers refer + # to the level position or name. + index_names = agged.index.names[: len(index)] + to_unstack = [] + for i in range(len(index), len(keys)): + name = agged.index.names[i] + if name is None or name in index_names: + to_unstack.append(i) + else: + to_unstack.append(name) + table = agged.unstack(to_unstack, fill_value=fill_value) + + if not dropna: + if isinstance(table.index, MultiIndex): + m = MultiIndex.from_arrays( + cartesian_product(table.index.levels), names=table.index.names + ) + table = table.reindex(m, axis=0, fill_value=fill_value) + + if isinstance(table.columns, MultiIndex): + m = MultiIndex.from_arrays( + cartesian_product(table.columns.levels), names=table.columns.names + ) + table = table.reindex(m, axis=1, fill_value=fill_value) + + if sort is True and isinstance(table, ABCDataFrame): + table = table.sort_index(axis=1) + + if fill_value is not None: + table = table.fillna(fill_value) + if aggfunc is len and not observed and lib.is_integer(fill_value): + # TODO: can we avoid this? this used to be handled by + # downcast="infer" in fillna + table = table.astype(np.int64) + + if margins: + if dropna: + data = data[data.notna().all(axis=1)] + table = _add_margins( + table, + data, + values, + rows=index, + cols=columns, + aggfunc=aggfunc, + observed=dropna, + margins_name=margins_name, + fill_value=fill_value, + ) + + # discard the top level + if values_passed and not values_multi and table.columns.nlevels > 1: + table.columns = table.columns.droplevel(0) + if len(index) == 0 and len(columns) > 0: + table = table.T + + # GH 15193 Make sure empty columns are removed if dropna=True + if isinstance(table, ABCDataFrame) and dropna: + table = table.dropna(how="all", axis=1) + + return table + + +def _add_margins( + table: DataFrame | Series, + data: DataFrame, + values, + rows, + cols, + aggfunc, + observed: bool, + margins_name: Hashable = "All", + fill_value=None, +): + if not isinstance(margins_name, str): + raise ValueError("margins_name argument must be a string") + + msg = f'Conflicting name "{margins_name}" in margins' + for level in table.index.names: + if margins_name in table.index.get_level_values(level): + raise ValueError(msg) + + grand_margin = _compute_grand_margin(data, values, aggfunc, margins_name) + + if table.ndim == 2: + # i.e. DataFrame + for level in table.columns.names[1:]: + if margins_name in table.columns.get_level_values(level): + raise ValueError(msg) + + key: str | tuple[str, ...] + if len(rows) > 1: + key = (margins_name,) + ("",) * (len(rows) - 1) + else: + key = margins_name + + if not values and isinstance(table, ABCSeries): + # If there are no values and the table is a series, then there is only + # one column in the data. Compute grand margin and return it. + return table._append(table._constructor({key: grand_margin[margins_name]})) + + elif values: + marginal_result_set = _generate_marginal_results( + table, data, values, rows, cols, aggfunc, observed, margins_name + ) + if not isinstance(marginal_result_set, tuple): + return marginal_result_set + result, margin_keys, row_margin = marginal_result_set + else: + # no values, and table is a DataFrame + assert isinstance(table, ABCDataFrame) + marginal_result_set = _generate_marginal_results_without_values( + table, data, rows, cols, aggfunc, observed, margins_name + ) + if not isinstance(marginal_result_set, tuple): + return marginal_result_set + result, margin_keys, row_margin = marginal_result_set + + row_margin = row_margin.reindex(result.columns, fill_value=fill_value) + # populate grand margin + for k in margin_keys: + if isinstance(k, str): + row_margin[k] = grand_margin[k] + else: + row_margin[k] = grand_margin[k[0]] + + from pandas import DataFrame + + margin_dummy = DataFrame(row_margin, columns=Index([key])).T + + row_names = result.index.names + # check the result column and leave floats + + for dtype in set(result.dtypes): + if isinstance(dtype, ExtensionDtype): + # Can hold NA already + continue + + cols = result.select_dtypes([dtype]).columns + margin_dummy[cols] = margin_dummy[cols].apply( + maybe_downcast_to_dtype, args=(dtype,) + ) + result = result._append(margin_dummy) + result.index.names = row_names + + return result + + +def _compute_grand_margin( + data: DataFrame, values, aggfunc, margins_name: Hashable = "All" +): + if values: + grand_margin = {} + for k, v in data[values].items(): + try: + if isinstance(aggfunc, str): + grand_margin[k] = getattr(v, aggfunc)() + elif isinstance(aggfunc, dict): + if isinstance(aggfunc[k], str): + grand_margin[k] = getattr(v, aggfunc[k])() + else: + grand_margin[k] = aggfunc[k](v) + else: + grand_margin[k] = aggfunc(v) + except TypeError: + pass + return grand_margin + else: + return {margins_name: aggfunc(data.index)} + + +def _generate_marginal_results( + table, + data: DataFrame, + values, + rows, + cols, + aggfunc, + observed: bool, + margins_name: Hashable = "All", +): + margin_keys: list | Index + if len(cols) > 0: + # need to "interleave" the margins + table_pieces = [] + margin_keys = [] + + def _all_key(key): + return (key, margins_name) + ("",) * (len(cols) - 1) + + if len(rows) > 0: + margin = data[rows + values].groupby(rows, observed=observed).agg(aggfunc) + cat_axis = 1 + + for key, piece in table.T.groupby(level=0, observed=observed): + piece = piece.T + all_key = _all_key(key) + + # we are going to mutate this, so need to copy! + piece = piece.copy() + piece[all_key] = margin[key] + + table_pieces.append(piece) + margin_keys.append(all_key) + else: + from pandas import DataFrame + + cat_axis = 0 + for key, piece in table.groupby(level=0, observed=observed): + if len(cols) > 1: + all_key = _all_key(key) + else: + all_key = margins_name + table_pieces.append(piece) + # GH31016 this is to calculate margin for each group, and assign + # corresponded key as index + transformed_piece = DataFrame(piece.apply(aggfunc)).T + if isinstance(piece.index, MultiIndex): + # We are adding an empty level + transformed_piece.index = MultiIndex.from_tuples( + [all_key], names=piece.index.names + [None] + ) + else: + transformed_piece.index = Index([all_key], name=piece.index.name) + + # append piece for margin into table_piece + table_pieces.append(transformed_piece) + margin_keys.append(all_key) + + if not table_pieces: + # GH 49240 + return table + else: + result = concat(table_pieces, axis=cat_axis) + + if len(rows) == 0: + return result + else: + result = table + margin_keys = table.columns + + if len(cols) > 0: + row_margin = data[cols + values].groupby(cols, observed=observed).agg(aggfunc) + row_margin = row_margin.stack(future_stack=True) + + # GH#26568. Use names instead of indices in case of numeric names + new_order_indices = [len(cols)] + list(range(len(cols))) + new_order_names = [row_margin.index.names[i] for i in new_order_indices] + row_margin.index = row_margin.index.reorder_levels(new_order_names) + else: + row_margin = data._constructor_sliced(np.nan, index=result.columns) + + return result, margin_keys, row_margin + + +def _generate_marginal_results_without_values( + table: DataFrame, + data: DataFrame, + rows, + cols, + aggfunc, + observed: bool, + margins_name: Hashable = "All", +): + margin_keys: list | Index + if len(cols) > 0: + # need to "interleave" the margins + margin_keys = [] + + def _all_key(): + if len(cols) == 1: + return margins_name + return (margins_name,) + ("",) * (len(cols) - 1) + + if len(rows) > 0: + margin = data.groupby(rows, observed=observed)[rows].apply(aggfunc) + all_key = _all_key() + table[all_key] = margin + result = table + margin_keys.append(all_key) + + else: + margin = data.groupby(level=0, axis=0, observed=observed).apply(aggfunc) + all_key = _all_key() + table[all_key] = margin + result = table + margin_keys.append(all_key) + return result + else: + result = table + margin_keys = table.columns + + if len(cols): + row_margin = data.groupby(cols, observed=observed)[cols].apply(aggfunc) + else: + row_margin = Series(np.nan, index=result.columns) + + return result, margin_keys, row_margin + + +def _convert_by(by): + if by is None: + by = [] + elif ( + is_scalar(by) + or isinstance(by, (np.ndarray, Index, ABCSeries, Grouper)) + or callable(by) + ): + by = [by] + else: + by = list(by) + return by + + +@Substitution("\ndata : DataFrame") +@Appender(_shared_docs["pivot"], indents=1) +def pivot( + data: DataFrame, + *, + columns: IndexLabel, + index: IndexLabel | lib.NoDefault = lib.no_default, + values: IndexLabel | lib.NoDefault = lib.no_default, +) -> DataFrame: + columns_listlike = com.convert_to_list_like(columns) + + # If columns is None we will create a MultiIndex level with None as name + # which might cause duplicated names because None is the default for + # level names + data = data.copy(deep=False) + data.index = data.index.copy() + data.index.names = [ + name if name is not None else lib.no_default for name in data.index.names + ] + + indexed: DataFrame | Series + if values is lib.no_default: + if index is not lib.no_default: + cols = com.convert_to_list_like(index) + else: + cols = [] + + append = index is lib.no_default + # error: Unsupported operand types for + ("List[Any]" and "ExtensionArray") + # error: Unsupported left operand type for + ("ExtensionArray") + indexed = data.set_index( + cols + columns_listlike, append=append # type: ignore[operator] + ) + else: + index_list: list[Index] | list[Series] + if index is lib.no_default: + if isinstance(data.index, MultiIndex): + # GH 23955 + index_list = [ + data.index.get_level_values(i) for i in range(data.index.nlevels) + ] + else: + index_list = [ + data._constructor_sliced(data.index, name=data.index.name) + ] + else: + index_list = [data[idx] for idx in com.convert_to_list_like(index)] + + data_columns = [data[col] for col in columns_listlike] + index_list.extend(data_columns) + multiindex = MultiIndex.from_arrays(index_list) + + if is_list_like(values) and not isinstance(values, tuple): + # Exclude tuple because it is seen as a single column name + values = cast(Sequence[Hashable], values) + indexed = data._constructor( + data[values]._values, index=multiindex, columns=values + ) + else: + indexed = data._constructor_sliced(data[values]._values, index=multiindex) + # error: Argument 1 to "unstack" of "DataFrame" has incompatible type "Union + # [List[Any], ExtensionArray, ndarray[Any, Any], Index, Series]"; expected + # "Hashable" + result = indexed.unstack(columns_listlike) # type: ignore[arg-type] + result.index.names = [ + name if name is not lib.no_default else None for name in result.index.names + ] + + return result + + +def crosstab( + index, + columns, + values=None, + rownames=None, + colnames=None, + aggfunc=None, + margins: bool = False, + margins_name: Hashable = "All", + dropna: bool = True, + normalize: bool | Literal[0, 1, "all", "index", "columns"] = False, +) -> DataFrame: + """ + Compute a simple cross tabulation of two (or more) factors. + + By default, computes a frequency table of the factors unless an + array of values and an aggregation function are passed. + + Parameters + ---------- + index : array-like, Series, or list of arrays/Series + Values to group by in the rows. + columns : array-like, Series, or list of arrays/Series + Values to group by in the columns. + values : array-like, optional + Array of values to aggregate according to the factors. + Requires `aggfunc` be specified. + rownames : sequence, default None + If passed, must match number of row arrays passed. + colnames : sequence, default None + If passed, must match number of column arrays passed. + aggfunc : function, optional + If specified, requires `values` be specified as well. + margins : bool, default False + Add row/column margins (subtotals). + margins_name : str, default 'All' + Name of the row/column that will contain the totals + when margins is True. + dropna : bool, default True + Do not include columns whose entries are all NaN. + normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False + Normalize by dividing all values by the sum of values. + + - If passed 'all' or `True`, will normalize over all values. + - If passed 'index' will normalize over each row. + - If passed 'columns' will normalize over each column. + - If margins is `True`, will also normalize margin values. + + Returns + ------- + DataFrame + Cross tabulation of the data. + + See Also + -------- + DataFrame.pivot : Reshape data based on column values. + pivot_table : Create a pivot table as a DataFrame. + + Notes + ----- + Any Series passed will have their name attributes used unless row or column + names for the cross-tabulation are specified. + + Any input passed containing Categorical data will have **all** of its + categories included in the cross-tabulation, even if the actual data does + not contain any instances of a particular category. + + In the event that there aren't overlapping indexes an empty DataFrame will + be returned. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + >>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar", + ... "bar", "bar", "foo", "foo", "foo"], dtype=object) + >>> b = np.array(["one", "one", "one", "two", "one", "one", + ... "one", "two", "two", "two", "one"], dtype=object) + >>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny", + ... "shiny", "dull", "shiny", "shiny", "shiny"], + ... dtype=object) + >>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c']) + b one two + c dull shiny dull shiny + a + bar 1 2 1 0 + foo 2 2 1 2 + + Here 'c' and 'f' are not represented in the data and will not be + shown in the output because dropna is True by default. Set + dropna=False to preserve categories with no data. + + >>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c']) + >>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f']) + >>> pd.crosstab(foo, bar) + col_0 d e + row_0 + a 1 0 + b 0 1 + >>> pd.crosstab(foo, bar, dropna=False) + col_0 d e f + row_0 + a 1 0 0 + b 0 1 0 + c 0 0 0 + """ + if values is None and aggfunc is not None: + raise ValueError("aggfunc cannot be used without values.") + + if values is not None and aggfunc is None: + raise ValueError("values cannot be used without an aggfunc.") + + if not is_nested_list_like(index): + index = [index] + if not is_nested_list_like(columns): + columns = [columns] + + common_idx = None + pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))] + if pass_objs: + common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False) + + rownames = _get_names(index, rownames, prefix="row") + colnames = _get_names(columns, colnames, prefix="col") + + # duplicate names mapped to unique names for pivot op + ( + rownames_mapper, + unique_rownames, + colnames_mapper, + unique_colnames, + ) = _build_names_mapper(rownames, colnames) + + from pandas import DataFrame + + data = { + **dict(zip(unique_rownames, index)), + **dict(zip(unique_colnames, columns)), + } + df = DataFrame(data, index=common_idx) + + if values is None: + df["__dummy__"] = 0 + kwargs = {"aggfunc": len, "fill_value": 0} + else: + df["__dummy__"] = values + kwargs = {"aggfunc": aggfunc} + + # error: Argument 7 to "pivot_table" of "DataFrame" has incompatible type + # "**Dict[str, object]"; expected "Union[...]" + table = df.pivot_table( + "__dummy__", + index=unique_rownames, + columns=unique_colnames, + margins=margins, + margins_name=margins_name, + dropna=dropna, + observed=False, + **kwargs, # type: ignore[arg-type] + ) + + # Post-process + if normalize is not False: + table = _normalize( + table, normalize=normalize, margins=margins, margins_name=margins_name + ) + + table = table.rename_axis(index=rownames_mapper, axis=0) + table = table.rename_axis(columns=colnames_mapper, axis=1) + + return table + + +def _normalize( + table: DataFrame, normalize, margins: bool, margins_name: Hashable = "All" +) -> DataFrame: + if not isinstance(normalize, (bool, str)): + axis_subs = {0: "index", 1: "columns"} + try: + normalize = axis_subs[normalize] + except KeyError as err: + raise ValueError("Not a valid normalize argument") from err + + if margins is False: + # Actual Normalizations + normalizers: dict[bool | str, Callable] = { + "all": lambda x: x / x.sum(axis=1).sum(axis=0), + "columns": lambda x: x / x.sum(), + "index": lambda x: x.div(x.sum(axis=1), axis=0), + } + + normalizers[True] = normalizers["all"] + + try: + f = normalizers[normalize] + except KeyError as err: + raise ValueError("Not a valid normalize argument") from err + + table = f(table) + table = table.fillna(0) + + elif margins is True: + # keep index and column of pivoted table + table_index = table.index + table_columns = table.columns + last_ind_or_col = table.iloc[-1, :].name + + # check if margin name is not in (for MI cases) and not equal to last + # index/column and save the column and index margin + if (margins_name not in last_ind_or_col) & (margins_name != last_ind_or_col): + raise ValueError(f"{margins_name} not in pivoted DataFrame") + column_margin = table.iloc[:-1, -1] + index_margin = table.iloc[-1, :-1] + + # keep the core table + table = table.iloc[:-1, :-1] + + # Normalize core + table = _normalize(table, normalize=normalize, margins=False) + + # Fix Margins + if normalize == "columns": + column_margin = column_margin / column_margin.sum() + table = concat([table, column_margin], axis=1) + table = table.fillna(0) + table.columns = table_columns + + elif normalize == "index": + index_margin = index_margin / index_margin.sum() + table = table._append(index_margin) + table = table.fillna(0) + table.index = table_index + + elif normalize == "all" or normalize is True: + column_margin = column_margin / column_margin.sum() + index_margin = index_margin / index_margin.sum() + index_margin.loc[margins_name] = 1 + table = concat([table, column_margin], axis=1) + table = table._append(index_margin) + + table = table.fillna(0) + table.index = table_index + table.columns = table_columns + + else: + raise ValueError("Not a valid normalize argument") + + else: + raise ValueError("Not a valid margins argument") + + return table + + +def _get_names(arrs, names, prefix: str = "row"): + if names is None: + names = [] + for i, arr in enumerate(arrs): + if isinstance(arr, ABCSeries) and arr.name is not None: + names.append(arr.name) + else: + names.append(f"{prefix}_{i}") + else: + if len(names) != len(arrs): + raise AssertionError("arrays and names must have the same length") + if not isinstance(names, list): + names = list(names) + + return names + + +def _build_names_mapper( + rownames: list[str], colnames: list[str] +) -> tuple[dict[str, str], list[str], dict[str, str], list[str]]: + """ + Given the names of a DataFrame's rows and columns, returns a set of unique row + and column names and mappers that convert to original names. + + A row or column name is replaced if it is duplicate among the rows of the inputs, + among the columns of the inputs or between the rows and the columns. + + Parameters + ---------- + rownames: list[str] + colnames: list[str] + + Returns + ------- + Tuple(Dict[str, str], List[str], Dict[str, str], List[str]) + + rownames_mapper: dict[str, str] + a dictionary with new row names as keys and original rownames as values + unique_rownames: list[str] + a list of rownames with duplicate names replaced by dummy names + colnames_mapper: dict[str, str] + a dictionary with new column names as keys and original column names as values + unique_colnames: list[str] + a list of column names with duplicate names replaced by dummy names + + """ + + def get_duplicates(names): + seen: set = set() + return {name for name in names if name not in seen} + + shared_names = set(rownames).intersection(set(colnames)) + dup_names = get_duplicates(rownames) | get_duplicates(colnames) | shared_names + + rownames_mapper = { + f"row_{i}": name for i, name in enumerate(rownames) if name in dup_names + } + unique_rownames = [ + f"row_{i}" if name in dup_names else name for i, name in enumerate(rownames) + ] + + colnames_mapper = { + f"col_{i}": name for i, name in enumerate(colnames) if name in dup_names + } + unique_colnames = [ + f"col_{i}" if name in dup_names else name for i, name in enumerate(colnames) + ] + + return rownames_mapper, unique_rownames, colnames_mapper, unique_colnames diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/reshape.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/reshape.py new file mode 100644 index 0000000000000000000000000000000000000000..7a49682d7c57c90ed26e890777758ad806bd961b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/reshape.py @@ -0,0 +1,989 @@ +from __future__ import annotations + +import itertools +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +import pandas._libs.reshape as libreshape +from pandas.errors import PerformanceWarning +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + find_common_type, + maybe_promote, +) +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_1d_only_ea_dtype, + is_integer, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.missing import notna + +import pandas.core.algorithms as algos +from pandas.core.algorithms import ( + factorize, + unique, +) +from pandas.core.arrays.categorical import factorize_from_iterable +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.frame import DataFrame +from pandas.core.indexes.api import ( + Index, + MultiIndex, + RangeIndex, +) +from pandas.core.reshape.concat import concat +from pandas.core.series import Series +from pandas.core.sorting import ( + compress_group_index, + decons_obs_group_ids, + get_compressed_ids, + get_group_index, + get_group_index_sorter, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Level, + npt, + ) + + from pandas.core.arrays import ExtensionArray + from pandas.core.indexes.frozen import FrozenList + + +class _Unstacker: + """ + Helper class to unstack data / pivot with multi-level index + + Parameters + ---------- + index : MultiIndex + level : int or str, default last level + Level to "unstack". Accepts a name for the level. + fill_value : scalar, optional + Default value to fill in missing values if subgroups do not have the + same set of labels. By default, missing values will be replaced with + the default fill value for that data type, NaN for float, NaT for + datetimelike, etc. For integer types, by default data will converted to + float and missing values will be set to NaN. + constructor : object + Pandas ``DataFrame`` or subclass used to create unstacked + response. If None, DataFrame will be used. + + Examples + -------- + >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), + ... ('two', 'a'), ('two', 'b')]) + >>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index) + >>> s + one a 1 + b 2 + two a 3 + b 4 + dtype: int64 + + >>> s.unstack(level=-1) + a b + one 1 2 + two 3 4 + + >>> s.unstack(level=0) + one two + a 1 3 + b 2 4 + + Returns + ------- + unstacked : DataFrame + """ + + def __init__( + self, index: MultiIndex, level: Level, constructor, sort: bool = True + ) -> None: + self.constructor = constructor + self.sort = sort + + self.index = index.remove_unused_levels() + + self.level = self.index._get_level_number(level) + + # when index includes `nan`, need to lift levels/strides by 1 + self.lift = 1 if -1 in self.index.codes[self.level] else 0 + + # Note: the "pop" below alters these in-place. + self.new_index_levels = list(self.index.levels) + self.new_index_names = list(self.index.names) + + self.removed_name = self.new_index_names.pop(self.level) + self.removed_level = self.new_index_levels.pop(self.level) + self.removed_level_full = index.levels[self.level] + if not self.sort: + unique_codes = unique(self.index.codes[self.level]) + self.removed_level = self.removed_level.take(unique_codes) + self.removed_level_full = self.removed_level_full.take(unique_codes) + + # Bug fix GH 20601 + # If the data frame is too big, the number of unique index combination + # will cause int32 overflow on windows environments. + # We want to check and raise an warning before this happens + num_rows = np.max([index_level.size for index_level in self.new_index_levels]) + num_columns = self.removed_level.size + + # GH20601: This forces an overflow if the number of cells is too high. + num_cells = num_rows * num_columns + + # GH 26314: Previous ValueError raised was too restrictive for many users. + if num_cells > np.iinfo(np.int32).max: + warnings.warn( + f"The following operation may generate {num_cells} cells " + f"in the resulting pandas object.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + self._make_selectors() + + @cache_readonly + def _indexer_and_to_sort( + self, + ) -> tuple[ + npt.NDArray[np.intp], + list[np.ndarray], # each has _some_ signed integer dtype + ]: + v = self.level + + codes = list(self.index.codes) + levs = list(self.index.levels) + to_sort = codes[:v] + codes[v + 1 :] + [codes[v]] + sizes = tuple(len(x) for x in levs[:v] + levs[v + 1 :] + [levs[v]]) + + comp_index, obs_ids = get_compressed_ids(to_sort, sizes) + ngroups = len(obs_ids) + + indexer = get_group_index_sorter(comp_index, ngroups) + return indexer, to_sort + + @cache_readonly + def sorted_labels(self) -> list[np.ndarray]: + indexer, to_sort = self._indexer_and_to_sort + if self.sort: + return [line.take(indexer) for line in to_sort] + return to_sort + + def _make_sorted_values(self, values: np.ndarray) -> np.ndarray: + if self.sort: + indexer, _ = self._indexer_and_to_sort + + sorted_values = algos.take_nd(values, indexer, axis=0) + return sorted_values + return values + + def _make_selectors(self): + new_levels = self.new_index_levels + + # make the mask + remaining_labels = self.sorted_labels[:-1] + level_sizes = tuple(len(x) for x in new_levels) + + comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes) + ngroups = len(obs_ids) + + comp_index = ensure_platform_int(comp_index) + stride = self.index.levshape[self.level] + self.lift + self.full_shape = ngroups, stride + + selector = self.sorted_labels[-1] + stride * comp_index + self.lift + mask = np.zeros(np.prod(self.full_shape), dtype=bool) + mask.put(selector, True) + + if mask.sum() < len(self.index): + raise ValueError("Index contains duplicate entries, cannot reshape") + + self.group_index = comp_index + self.mask = mask + if self.sort: + self.compressor = comp_index.searchsorted(np.arange(ngroups)) + else: + self.compressor = np.sort(np.unique(comp_index, return_index=True)[1]) + + @cache_readonly + def mask_all(self) -> bool: + return bool(self.mask.all()) + + @cache_readonly + def arange_result(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.bool_]]: + # We cache this for reuse in ExtensionBlock._unstack + dummy_arr = np.arange(len(self.index), dtype=np.intp) + new_values, mask = self.get_new_values(dummy_arr, fill_value=-1) + return new_values, mask.any(0) + # TODO: in all tests we have mask.any(0).all(); can we rely on that? + + def get_result(self, values, value_columns, fill_value) -> DataFrame: + if values.ndim == 1: + values = values[:, np.newaxis] + + if value_columns is None and values.shape[1] != 1: # pragma: no cover + raise ValueError("must pass column labels for multi-column data") + + values, _ = self.get_new_values(values, fill_value) + columns = self.get_new_columns(value_columns) + index = self.new_index + + return self.constructor( + values, index=index, columns=columns, dtype=values.dtype + ) + + def get_new_values(self, values, fill_value=None): + if values.ndim == 1: + values = values[:, np.newaxis] + + sorted_values = self._make_sorted_values(values) + + # place the values + length, width = self.full_shape + stride = values.shape[1] + result_width = width * stride + result_shape = (length, result_width) + mask = self.mask + mask_all = self.mask_all + + # we can simply reshape if we don't have a mask + if mask_all and len(values): + # TODO: Under what circumstances can we rely on sorted_values + # matching values? When that holds, we can slice instead + # of take (in particular for EAs) + new_values = ( + sorted_values.reshape(length, width, stride) + .swapaxes(1, 2) + .reshape(result_shape) + ) + new_mask = np.ones(result_shape, dtype=bool) + return new_values, new_mask + + dtype = values.dtype + + # if our mask is all True, then we can use our existing dtype + if mask_all: + dtype = values.dtype + new_values = np.empty(result_shape, dtype=dtype) + else: + if isinstance(dtype, ExtensionDtype): + # GH#41875 + # We are assuming that fill_value can be held by this dtype, + # unlike the non-EA case that promotes. + cls = dtype.construct_array_type() + new_values = cls._empty(result_shape, dtype=dtype) + new_values[:] = fill_value + else: + dtype, fill_value = maybe_promote(dtype, fill_value) + new_values = np.empty(result_shape, dtype=dtype) + new_values.fill(fill_value) + + name = dtype.name + new_mask = np.zeros(result_shape, dtype=bool) + + # we need to convert to a basic dtype + # and possibly coerce an input to our output dtype + # e.g. ints -> floats + if needs_i8_conversion(values.dtype): + sorted_values = sorted_values.view("i8") + new_values = new_values.view("i8") + else: + sorted_values = sorted_values.astype(name, copy=False) + + # fill in our values & mask + libreshape.unstack( + sorted_values, + mask.view("u1"), + stride, + length, + width, + new_values, + new_mask.view("u1"), + ) + + # reconstruct dtype if needed + if needs_i8_conversion(values.dtype): + # view as datetime64 so we can wrap in DatetimeArray and use + # DTA's view method + new_values = new_values.view("M8[ns]") + new_values = ensure_wrapped_if_datetimelike(new_values) + new_values = new_values.view(values.dtype) + + return new_values, new_mask + + def get_new_columns(self, value_columns: Index | None): + if value_columns is None: + if self.lift == 0: + return self.removed_level._rename(name=self.removed_name) + + lev = self.removed_level.insert(0, item=self.removed_level._na_value) + return lev.rename(self.removed_name) + + stride = len(self.removed_level) + self.lift + width = len(value_columns) + propagator = np.repeat(np.arange(width), stride) + + new_levels: FrozenList | list[Index] + + if isinstance(value_columns, MultiIndex): + # error: Cannot determine type of "__add__" [has-type] + new_levels = value_columns.levels + ( # type: ignore[has-type] + self.removed_level_full, + ) + new_names = value_columns.names + (self.removed_name,) + + new_codes = [lab.take(propagator) for lab in value_columns.codes] + else: + new_levels = [ + value_columns, + self.removed_level_full, + ] + new_names = [value_columns.name, self.removed_name] + new_codes = [propagator] + + repeater = self._repeater + + # The entire level is then just a repetition of the single chunk: + new_codes.append(np.tile(repeater, width)) + return MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + + @cache_readonly + def _repeater(self) -> np.ndarray: + # The two indices differ only if the unstacked level had unused items: + if len(self.removed_level_full) != len(self.removed_level): + # In this case, we remap the new codes to the original level: + repeater = self.removed_level_full.get_indexer(self.removed_level) + if self.lift: + repeater = np.insert(repeater, 0, -1) + else: + # Otherwise, we just use each level item exactly once: + stride = len(self.removed_level) + self.lift + repeater = np.arange(stride) - self.lift + + return repeater + + @cache_readonly + def new_index(self) -> MultiIndex: + # Does not depend on values or value_columns + result_codes = [lab.take(self.compressor) for lab in self.sorted_labels[:-1]] + + # construct the new index + if len(self.new_index_levels) == 1: + level, level_codes = self.new_index_levels[0], result_codes[0] + if (level_codes == -1).any(): + level = level.insert(len(level), level._na_value) + return level.take(level_codes).rename(self.new_index_names[0]) + + return MultiIndex( + levels=self.new_index_levels, + codes=result_codes, + names=self.new_index_names, + verify_integrity=False, + ) + + +def _unstack_multiple( + data: Series | DataFrame, clocs, fill_value=None, sort: bool = True +): + if len(clocs) == 0: + return data + + # NOTE: This doesn't deal with hierarchical columns yet + + index = data.index + index = cast(MultiIndex, index) # caller is responsible for checking + + # GH 19966 Make sure if MultiIndexed index has tuple name, they will be + # recognised as a whole + if clocs in index.names: + clocs = [clocs] + clocs = [index._get_level_number(i) for i in clocs] + + rlocs = [i for i in range(index.nlevels) if i not in clocs] + + clevels = [index.levels[i] for i in clocs] + ccodes = [index.codes[i] for i in clocs] + cnames = [index.names[i] for i in clocs] + rlevels = [index.levels[i] for i in rlocs] + rcodes = [index.codes[i] for i in rlocs] + rnames = [index.names[i] for i in rlocs] + + shape = tuple(len(x) for x in clevels) + group_index = get_group_index(ccodes, shape, sort=False, xnull=False) + + comp_ids, obs_ids = compress_group_index(group_index, sort=False) + recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False) + + if not rlocs: + # Everything is in clocs, so the dummy df has a regular index + dummy_index = Index(obs_ids, name="__placeholder__") + else: + dummy_index = MultiIndex( + levels=rlevels + [obs_ids], + codes=rcodes + [comp_ids], + names=rnames + ["__placeholder__"], + verify_integrity=False, + ) + + if isinstance(data, Series): + dummy = data.copy() + dummy.index = dummy_index + + unstacked = dummy.unstack("__placeholder__", fill_value=fill_value, sort=sort) + new_levels = clevels + new_names = cnames + new_codes = recons_codes + else: + if isinstance(data.columns, MultiIndex): + result = data + while clocs: + val = clocs.pop(0) + result = result.unstack(val, fill_value=fill_value, sort=sort) + clocs = [v if v < val else v - 1 for v in clocs] + + return result + + # GH#42579 deep=False to avoid consolidating + dummy_df = data.copy(deep=False) + dummy_df.index = dummy_index + + unstacked = dummy_df.unstack( + "__placeholder__", fill_value=fill_value, sort=sort + ) + if isinstance(unstacked, Series): + unstcols = unstacked.index + else: + unstcols = unstacked.columns + assert isinstance(unstcols, MultiIndex) # for mypy + new_levels = [unstcols.levels[0]] + clevels + new_names = [data.columns.name] + cnames + + new_codes = [unstcols.codes[0]] + new_codes.extend(rec.take(unstcols.codes[-1]) for rec in recons_codes) + + new_columns = MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + + if isinstance(unstacked, Series): + unstacked.index = new_columns + else: + unstacked.columns = new_columns + + return unstacked + + +def unstack(obj: Series | DataFrame, level, fill_value=None, sort: bool = True): + if isinstance(level, (tuple, list)): + if len(level) != 1: + # _unstack_multiple only handles MultiIndexes, + # and isn't needed for a single level + return _unstack_multiple(obj, level, fill_value=fill_value, sort=sort) + else: + level = level[0] + + if not is_integer(level) and not level == "__placeholder__": + # check if level is valid in case of regular index + obj.index._get_level_number(level) + + if isinstance(obj, DataFrame): + if isinstance(obj.index, MultiIndex): + return _unstack_frame(obj, level, fill_value=fill_value, sort=sort) + else: + return obj.T.stack(future_stack=True) + elif not isinstance(obj.index, MultiIndex): + # GH 36113 + # Give nicer error messages when unstack a Series whose + # Index is not a MultiIndex. + raise ValueError( + f"index must be a MultiIndex to unstack, {type(obj.index)} was passed" + ) + else: + if is_1d_only_ea_dtype(obj.dtype): + return _unstack_extension_series(obj, level, fill_value, sort=sort) + unstacker = _Unstacker( + obj.index, level=level, constructor=obj._constructor_expanddim, sort=sort + ) + return unstacker.get_result( + obj._values, value_columns=None, fill_value=fill_value + ) + + +def _unstack_frame( + obj: DataFrame, level, fill_value=None, sort: bool = True +) -> DataFrame: + assert isinstance(obj.index, MultiIndex) # checked by caller + unstacker = _Unstacker( + obj.index, level=level, constructor=obj._constructor, sort=sort + ) + + if not obj._can_fast_transpose: + mgr = obj._mgr.unstack(unstacker, fill_value=fill_value) + return obj._constructor_from_mgr(mgr, axes=mgr.axes) + else: + return unstacker.get_result( + obj._values, value_columns=obj.columns, fill_value=fill_value + ) + + +def _unstack_extension_series( + series: Series, level, fill_value, sort: bool +) -> DataFrame: + """ + Unstack an ExtensionArray-backed Series. + + The ExtensionDtype is preserved. + + Parameters + ---------- + series : Series + A Series with an ExtensionArray for values + level : Any + The level name or number. + fill_value : Any + The user-level (not physical storage) fill value to use for + missing values introduced by the reshape. Passed to + ``series.values.take``. + sort : bool + Whether to sort the resulting MuliIndex levels + + Returns + ------- + DataFrame + Each column of the DataFrame will have the same dtype as + the input Series. + """ + # Defer to the logic in ExtensionBlock._unstack + df = series.to_frame() + result = df.unstack(level=level, fill_value=fill_value, sort=sort) + + # equiv: result.droplevel(level=0, axis=1) + # but this avoids an extra copy + result.columns = result.columns._drop_level_numbers([0]) + return result + + +def stack(frame: DataFrame, level=-1, dropna: bool = True, sort: bool = True): + """ + Convert DataFrame to Series with multi-level Index. Columns become the + second level of the resulting hierarchical index + + Returns + ------- + stacked : Series or DataFrame + """ + + def stack_factorize(index): + if index.is_unique: + return index, np.arange(len(index)) + codes, categories = factorize_from_iterable(index) + return categories, codes + + N, K = frame.shape + + # Will also convert negative level numbers and check if out of bounds. + level_num = frame.columns._get_level_number(level) + + if isinstance(frame.columns, MultiIndex): + return _stack_multi_columns( + frame, level_num=level_num, dropna=dropna, sort=sort + ) + elif isinstance(frame.index, MultiIndex): + new_levels = list(frame.index.levels) + new_codes = [lab.repeat(K) for lab in frame.index.codes] + + clev, clab = stack_factorize(frame.columns) + new_levels.append(clev) + new_codes.append(np.tile(clab, N).ravel()) + + new_names = list(frame.index.names) + new_names.append(frame.columns.name) + new_index = MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + else: + levels, (ilab, clab) = zip(*map(stack_factorize, (frame.index, frame.columns))) + codes = ilab.repeat(K), np.tile(clab, N).ravel() + new_index = MultiIndex( + levels=levels, + codes=codes, + names=[frame.index.name, frame.columns.name], + verify_integrity=False, + ) + + new_values: ArrayLike + if not frame.empty and frame._is_homogeneous_type: + # For homogeneous EAs, frame._values will coerce to object. So + # we concatenate instead. + dtypes = list(frame.dtypes._values) + dtype = dtypes[0] + + if isinstance(dtype, ExtensionDtype): + arr = dtype.construct_array_type() + new_values = arr._concat_same_type( + [col._values for _, col in frame.items()] + ) + new_values = _reorder_for_extension_array_stack(new_values, N, K) + else: + # homogeneous, non-EA + new_values = frame._values.ravel() + + else: + # non-homogeneous + new_values = frame._values.ravel() + + if dropna: + mask = notna(new_values) + new_values = new_values[mask] + new_index = new_index[mask] + + return frame._constructor_sliced(new_values, index=new_index) + + +def stack_multiple(frame: DataFrame, level, dropna: bool = True, sort: bool = True): + # If all passed levels match up to column names, no + # ambiguity about what to do + if all(lev in frame.columns.names for lev in level): + result = frame + for lev in level: + result = stack(result, lev, dropna=dropna, sort=sort) + + # Otherwise, level numbers may change as each successive level is stacked + elif all(isinstance(lev, int) for lev in level): + # As each stack is done, the level numbers decrease, so we need + # to account for that when level is a sequence of ints + result = frame + # _get_level_number() checks level numbers are in range and converts + # negative numbers to positive + level = [frame.columns._get_level_number(lev) for lev in level] + + while level: + lev = level.pop(0) + result = stack(result, lev, dropna=dropna, sort=sort) + # Decrement all level numbers greater than current, as these + # have now shifted down by one + level = [v if v <= lev else v - 1 for v in level] + + else: + raise ValueError( + "level should contain all level names or all level " + "numbers, not a mixture of the two." + ) + + return result + + +def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex: + """Creates a MultiIndex from the first N-1 levels of this MultiIndex.""" + if len(columns.levels) <= 2: + return columns.levels[0]._rename(name=columns.names[0]) + + levs = [ + [lev[c] if c >= 0 else None for c in codes] + for lev, codes in zip(columns.levels[:-1], columns.codes[:-1]) + ] + + # Remove duplicate tuples in the MultiIndex. + tuples = zip(*levs) + unique_tuples = (key for key, _ in itertools.groupby(tuples)) + new_levs = zip(*unique_tuples) + + # The dtype of each level must be explicitly set to avoid inferring the wrong type. + # See GH-36991. + return MultiIndex.from_arrays( + [ + # Not all indices can accept None values. + Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev + for new_lev, lev in zip(new_levs, columns.levels) + ], + names=columns.names[:-1], + ) + + +def _stack_multi_columns( + frame: DataFrame, level_num: int = -1, dropna: bool = True, sort: bool = True +) -> DataFrame: + def _convert_level_number(level_num: int, columns: Index): + """ + Logic for converting the level number to something we can safely pass + to swaplevel. + + If `level_num` matches a column name return the name from + position `level_num`, otherwise return `level_num`. + """ + if level_num in columns.names: + return columns.names[level_num] + + return level_num + + this = frame.copy(deep=False) + mi_cols = this.columns # cast(MultiIndex, this.columns) + assert isinstance(mi_cols, MultiIndex) # caller is responsible + + # this makes life much simpler + if level_num != mi_cols.nlevels - 1: + # roll levels to put selected level at end + roll_columns = mi_cols + for i in range(level_num, mi_cols.nlevels - 1): + # Need to check if the ints conflict with level names + lev1 = _convert_level_number(i, roll_columns) + lev2 = _convert_level_number(i + 1, roll_columns) + roll_columns = roll_columns.swaplevel(lev1, lev2) + this.columns = mi_cols = roll_columns + + if not mi_cols._is_lexsorted() and sort: + # Workaround the edge case where 0 is one of the column names, + # which interferes with trying to sort based on the first + # level + level_to_sort = _convert_level_number(0, mi_cols) + this = this.sort_index(level=level_to_sort, axis=1) + mi_cols = this.columns + + mi_cols = cast(MultiIndex, mi_cols) + new_columns = _stack_multi_column_index(mi_cols) + + # time to ravel the values + new_data = {} + level_vals = mi_cols.levels[-1] + level_codes = unique(mi_cols.codes[-1]) + if sort: + level_codes = np.sort(level_codes) + level_vals_nan = level_vals.insert(len(level_vals), None) + + level_vals_used = np.take(level_vals_nan, level_codes) + levsize = len(level_codes) + drop_cols = [] + for key in new_columns: + try: + loc = this.columns.get_loc(key) + except KeyError: + drop_cols.append(key) + continue + + # can make more efficient? + # we almost always return a slice + # but if unsorted can get a boolean + # indexer + if not isinstance(loc, slice): + slice_len = len(loc) + else: + slice_len = loc.stop - loc.start + + if slice_len != levsize: + chunk = this.loc[:, this.columns[loc]] + chunk.columns = level_vals_nan.take(chunk.columns.codes[-1]) + value_slice = chunk.reindex(columns=level_vals_used).values + else: + subset = this.iloc[:, loc] + dtype = find_common_type(subset.dtypes.tolist()) + if isinstance(dtype, ExtensionDtype): + # TODO(EA2D): won't need special case, can go through .values + # paths below (might change to ._values) + value_slice = dtype.construct_array_type()._concat_same_type( + [x._values.astype(dtype, copy=False) for _, x in subset.items()] + ) + N, K = subset.shape + idx = np.arange(N * K).reshape(K, N).T.ravel() + value_slice = value_slice.take(idx) + else: + value_slice = subset.values + + if value_slice.ndim > 1: + # i.e. not extension + value_slice = value_slice.ravel() + + new_data[key] = value_slice + + if len(drop_cols) > 0: + new_columns = new_columns.difference(drop_cols) + + N = len(this) + + if isinstance(this.index, MultiIndex): + new_levels = list(this.index.levels) + new_names = list(this.index.names) + new_codes = [lab.repeat(levsize) for lab in this.index.codes] + else: + old_codes, old_levels = factorize_from_iterable(this.index) + new_levels = [old_levels] + new_codes = [old_codes.repeat(levsize)] + new_names = [this.index.name] # something better? + + new_levels.append(level_vals) + new_codes.append(np.tile(level_codes, N)) + new_names.append(frame.columns.names[level_num]) + + new_index = MultiIndex( + levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False + ) + + result = frame._constructor(new_data, index=new_index, columns=new_columns) + + if frame.columns.nlevels > 1: + desired_columns = frame.columns._drop_level_numbers([level_num]).unique() + if not result.columns.equals(desired_columns): + result = result[desired_columns] + + # more efficient way to go about this? can do the whole masking biz but + # will only save a small amount of time... + if dropna: + result = result.dropna(axis=0, how="all") + + return result + + +def _reorder_for_extension_array_stack( + arr: ExtensionArray, n_rows: int, n_columns: int +) -> ExtensionArray: + """ + Re-orders the values when stacking multiple extension-arrays. + + The indirect stacking method used for EAs requires a followup + take to get the order correct. + + Parameters + ---------- + arr : ExtensionArray + n_rows, n_columns : int + The number of rows and columns in the original DataFrame. + + Returns + ------- + taken : ExtensionArray + The original `arr` with elements re-ordered appropriately + + Examples + -------- + >>> arr = np.array(['a', 'b', 'c', 'd', 'e', 'f']) + >>> _reorder_for_extension_array_stack(arr, 2, 3) + array(['a', 'c', 'e', 'b', 'd', 'f'], dtype='>> _reorder_for_extension_array_stack(arr, 3, 2) + array(['a', 'd', 'b', 'e', 'c', 'f'], dtype=' Series | DataFrame: + if frame.columns.nunique() != len(frame.columns): + raise ValueError("Columns with duplicate values are not supported in stack") + + # If we need to drop `level` from columns, it needs to be in descending order + drop_levnums = sorted(level, reverse=True) + stack_cols = frame.columns._drop_level_numbers( + [k for k in range(frame.columns.nlevels) if k not in level][::-1] + ) + if len(level) > 1: + # Arrange columns in the order we want to take them, e.g. level=[2, 0, 1] + sorter = np.argsort(level) + ordered_stack_cols = stack_cols._reorder_ilevels(sorter) + else: + ordered_stack_cols = stack_cols + + stack_cols_unique = stack_cols.unique() + ordered_stack_cols_unique = ordered_stack_cols.unique() + + # Grab data for each unique index to be stacked + buf = [] + for idx in stack_cols_unique: + if len(frame.columns) == 1: + data = frame.copy() + else: + # Take the data from frame corresponding to this idx value + if len(level) == 1: + idx = (idx,) + gen = iter(idx) + column_indexer = tuple( + next(gen) if k in level else slice(None) + for k in range(frame.columns.nlevels) + ) + data = frame.loc[:, column_indexer] + + if len(level) < frame.columns.nlevels: + data.columns = data.columns._drop_level_numbers(drop_levnums) + elif stack_cols.nlevels == 1: + if data.ndim == 1: + data.name = 0 + else: + data.columns = RangeIndex(len(data.columns)) + buf.append(data) + + result: Series | DataFrame + if len(buf) > 0 and not frame.empty: + result = concat(buf) + ratio = len(result) // len(frame) + else: + # input is empty + if len(level) < frame.columns.nlevels: + # concat column order may be different from dropping the levels + new_columns = frame.columns._drop_level_numbers(drop_levnums).unique() + else: + new_columns = [0] + result = DataFrame(columns=new_columns, dtype=frame._values.dtype) + ratio = 0 + + if len(level) < frame.columns.nlevels: + # concat column order may be different from dropping the levels + desired_columns = frame.columns._drop_level_numbers(drop_levnums).unique() + if not result.columns.equals(desired_columns): + result = result[desired_columns] + + # Construct the correct MultiIndex by combining the frame's index and + # stacked columns. + index_levels: list | FrozenList + if isinstance(frame.index, MultiIndex): + index_levels = frame.index.levels + index_codes = list(np.tile(frame.index.codes, (1, ratio))) + else: + codes, uniques = factorize(frame.index, use_na_sentinel=False) + index_levels = [uniques] + index_codes = list(np.tile(codes, (1, ratio))) + if isinstance(stack_cols, MultiIndex): + column_levels = ordered_stack_cols.levels + column_codes = ordered_stack_cols.drop_duplicates().codes + else: + column_levels = [ordered_stack_cols.unique()] + column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]] + column_codes = [np.repeat(codes, len(frame)) for codes in column_codes] + result.index = MultiIndex( + levels=index_levels + column_levels, + codes=index_codes + column_codes, + names=frame.index.names + list(ordered_stack_cols.names), + verify_integrity=False, + ) + + # sort result, but faster than calling sort_index since we know the order we need + len_df = len(frame) + n_uniques = len(ordered_stack_cols_unique) + indexer = np.arange(n_uniques) + idxs = np.tile(len_df * indexer, len_df) + np.repeat(np.arange(len_df), n_uniques) + result = result.take(idxs) + + # Reshape/rename if needed and dropna + if result.ndim == 2 and frame.columns.nlevels == len(level): + if len(result.columns) == 0: + result = Series(index=result.index) + else: + result = result.iloc[:, 0] + if result.ndim == 1: + result.name = None + + return result diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/tile.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/tile.py new file mode 100644 index 0000000000000000000000000000000000000000..2b0c6fbb8e3bf69fcb60ce3257450260af2a9f6b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/tile.py @@ -0,0 +1,638 @@ +""" +Quantilization functions and related stuff +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs import ( + Timedelta, + Timestamp, + lib, +) + +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_bool_dtype, + is_integer, + is_list_like, + is_numeric_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, +) +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.missing import isna + +from pandas import ( + Categorical, + Index, + IntervalIndex, +) +import pandas.core.algorithms as algos +from pandas.core.arrays.datetimelike import dtype_to_unit + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeObj, + IntervalLeftRight, + ) + + +def cut( + x, + bins, + right: bool = True, + labels=None, + retbins: bool = False, + precision: int = 3, + include_lowest: bool = False, + duplicates: str = "raise", + ordered: bool = True, +): + """ + Bin values into discrete intervals. + + Use `cut` when you need to segment and sort data values into bins. This + function is also useful for going from a continuous variable to a + categorical variable. For example, `cut` could convert ages to groups of + age ranges. Supports binning into an equal number of bins, or a + pre-specified array of bins. + + Parameters + ---------- + x : array-like + The input array to be binned. Must be 1-dimensional. + bins : int, sequence of scalars, or IntervalIndex + The criteria to bin by. + + * int : Defines the number of equal-width bins in the range of `x`. The + range of `x` is extended by .1% on each side to include the minimum + and maximum values of `x`. + * sequence of scalars : Defines the bin edges allowing for non-uniform + width. No extension of the range of `x` is done. + * IntervalIndex : Defines the exact bins to be used. Note that + IntervalIndex for `bins` must be non-overlapping. + + right : bool, default True + Indicates whether `bins` includes the rightmost edge or not. If + ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]`` + indicate (1,2], (2,3], (3,4]. This argument is ignored when + `bins` is an IntervalIndex. + labels : array or False, default None + Specifies the labels for the returned bins. Must be the same length as + the resulting bins. If False, returns only integer indicators of the + bins. This affects the type of the output container (see below). + This argument is ignored when `bins` is an IntervalIndex. If True, + raises an error. When `ordered=False`, labels must be provided. + retbins : bool, default False + Whether to return the bins or not. Useful when bins is provided + as a scalar. + precision : int, default 3 + The precision at which to store and display the bins labels. + include_lowest : bool, default False + Whether the first interval should be left-inclusive or not. + duplicates : {default 'raise', 'drop'}, optional + If bin edges are not unique, raise ValueError or drop non-uniques. + ordered : bool, default True + Whether the labels are ordered or not. Applies to returned types + Categorical and Series (with Categorical dtype). If True, + the resulting categorical will be ordered. If False, the resulting + categorical will be unordered (labels must be provided). + + Returns + ------- + out : Categorical, Series, or ndarray + An array-like object representing the respective bin for each value + of `x`. The type depends on the value of `labels`. + + * None (default) : returns a Series for Series `x` or a + Categorical for all other inputs. The values stored within + are Interval dtype. + + * sequence of scalars : returns a Series for Series `x` or a + Categorical for all other inputs. The values stored within + are whatever the type in the sequence is. + + * False : returns an ndarray of integers. + + bins : numpy.ndarray or IntervalIndex. + The computed or specified bins. Only returned when `retbins=True`. + For scalar or sequence `bins`, this is an ndarray with the computed + bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For + an IntervalIndex `bins`, this is equal to `bins`. + + See Also + -------- + qcut : Discretize variable into equal-sized buckets based on rank + or based on sample quantiles. + Categorical : Array type for storing data that come from a + fixed set of values. + Series : One-dimensional array with axis labels (including time series). + IntervalIndex : Immutable Index implementing an ordered, sliceable set. + + Notes + ----- + Any NA values will be NA in the result. Out of bounds values will be NA in + the resulting Series or Categorical object. + + Reference :ref:`the user guide ` for more examples. + + Examples + -------- + Discretize into three equal-sized bins. + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3) + ... # doctest: +ELLIPSIS + [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... + Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ... + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True) + ... # doctest: +ELLIPSIS + ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ... + Categories (3, interval[float64, right]): [(0.994, 3.0] < (3.0, 5.0] ... + array([0.994, 3. , 5. , 7. ])) + + Discovers the same bins, but assign them specific labels. Notice that + the returned Categorical's categories are `labels` and is ordered. + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), + ... 3, labels=["bad", "medium", "good"]) + ['bad', 'good', 'medium', 'medium', 'good', 'bad'] + Categories (3, object): ['bad' < 'medium' < 'good'] + + ``ordered=False`` will result in unordered categories when labels are passed. + This parameter can be used to allow non-unique labels: + + >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, + ... labels=["B", "A", "B"], ordered=False) + ['B', 'B', 'A', 'A', 'B', 'B'] + Categories (2, object): ['A', 'B'] + + ``labels=False`` implies you just want the bins back. + + >>> pd.cut([0, 1, 1, 2], bins=4, labels=False) + array([0, 1, 1, 3]) + + Passing a Series as an input returns a Series with categorical dtype: + + >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), + ... index=['a', 'b', 'c', 'd', 'e']) + >>> pd.cut(s, 3) + ... # doctest: +ELLIPSIS + a (1.992, 4.667] + b (1.992, 4.667] + c (4.667, 7.333] + d (7.333, 10.0] + e (7.333, 10.0] + dtype: category + Categories (3, interval[float64, right]): [(1.992, 4.667] < (4.667, ... + + Passing a Series as an input returns a Series with mapping value. + It is used to map numerically to intervals based on bins. + + >>> s = pd.Series(np.array([2, 4, 6, 8, 10]), + ... index=['a', 'b', 'c', 'd', 'e']) + >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False) + ... # doctest: +ELLIPSIS + (a 1.0 + b 2.0 + c 3.0 + d 4.0 + e NaN + dtype: float64, + array([ 0, 2, 4, 6, 8, 10])) + + Use `drop` optional when bins is not unique + + >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True, + ... right=False, duplicates='drop') + ... # doctest: +ELLIPSIS + (a 1.0 + b 2.0 + c 3.0 + d 3.0 + e NaN + dtype: float64, + array([ 0, 2, 4, 6, 10])) + + Passing an IntervalIndex for `bins` results in those categories exactly. + Notice that values not covered by the IntervalIndex are set to NaN. 0 + is to the left of the first bin (which is closed on the right), and 1.5 + falls between two bins. + + >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins) + [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]] + Categories (3, interval[int64, right]): [(0, 1] < (2, 3] < (4, 5]] + """ + # NOTE: this binning code is changed a bit from histogram for var(x) == 0 + + original = x + x_idx = _preprocess_for_cut(x) + x_idx, _ = _coerce_to_type(x_idx) + + if not np.iterable(bins): + bins = _nbins_to_bins(x_idx, bins, right) + + elif isinstance(bins, IntervalIndex): + if bins.is_overlapping: + raise ValueError("Overlapping IntervalIndex is not accepted.") + + else: + bins = Index(bins) + if not bins.is_monotonic_increasing: + raise ValueError("bins must increase monotonically.") + + fac, bins = _bins_to_cuts( + x_idx, + bins, + right=right, + labels=labels, + precision=precision, + include_lowest=include_lowest, + duplicates=duplicates, + ordered=ordered, + ) + + return _postprocess_for_cut(fac, bins, retbins, original) + + +def qcut( + x, + q, + labels=None, + retbins: bool = False, + precision: int = 3, + duplicates: str = "raise", +): + """ + Quantile-based discretization function. + + Discretize variable into equal-sized buckets based on rank or based + on sample quantiles. For example 1000 values for 10 quantiles would + produce a Categorical object indicating quantile membership for each data point. + + Parameters + ---------- + x : 1d ndarray or Series + q : int or list-like of float + Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately + array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles. + labels : array or False, default None + Used as labels for the resulting bins. Must be of the same length as + the resulting bins. If False, return only integer indicators of the + bins. If True, raises an error. + retbins : bool, optional + Whether to return the (bins, labels) or not. Can be useful if bins + is given as a scalar. + precision : int, optional + The precision at which to store and display the bins labels. + duplicates : {default 'raise', 'drop'}, optional + If bin edges are not unique, raise ValueError or drop non-uniques. + + Returns + ------- + out : Categorical or Series or array of integers if labels is False + The return type (Categorical or Series) depends on the input: a Series + of type category if input is a Series else Categorical. Bins are + represented as categories when categorical data is returned. + bins : ndarray of floats + Returned only if `retbins` is True. + + Notes + ----- + Out of bounds values will be NA in the resulting Categorical object + + Examples + -------- + >>> pd.qcut(range(5), 4) + ... # doctest: +ELLIPSIS + [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]] + Categories (4, interval[float64, right]): [(-0.001, 1.0] < (1.0, 2.0] ... + + >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"]) + ... # doctest: +SKIP + [good, good, medium, bad, bad] + Categories (3, object): [good < medium < bad] + + >>> pd.qcut(range(5), 4, labels=False) + array([0, 0, 1, 2, 3]) + """ + original = x + x_idx = _preprocess_for_cut(x) + x_idx, _ = _coerce_to_type(x_idx) + + quantiles = np.linspace(0, 1, q + 1) if is_integer(q) else q + + bins = x_idx.to_series().dropna().quantile(quantiles) + + fac, bins = _bins_to_cuts( + x_idx, + Index(bins), + labels=labels, + precision=precision, + include_lowest=True, + duplicates=duplicates, + ) + + return _postprocess_for_cut(fac, bins, retbins, original) + + +def _nbins_to_bins(x_idx: Index, nbins: int, right: bool) -> Index: + """ + If a user passed an integer N for bins, convert this to a sequence of N + equal(ish)-sized bins. + """ + if is_scalar(nbins) and nbins < 1: + raise ValueError("`bins` should be a positive integer.") + + if x_idx.size == 0: + raise ValueError("Cannot cut empty array") + + rng = (x_idx.min(), x_idx.max()) + mn, mx = rng + + if is_numeric_dtype(x_idx.dtype) and (np.isinf(mn) or np.isinf(mx)): + # GH#24314 + raise ValueError( + "cannot specify integer `bins` when input data contains infinity" + ) + + if mn == mx: # adjust end points before binning + if _is_dt_or_td(x_idx.dtype): + # using seconds=1 is pretty arbitrary here + # error: Argument 1 to "dtype_to_unit" has incompatible type + # "dtype[Any] | ExtensionDtype"; expected "DatetimeTZDtype | dtype[Any]" + unit = dtype_to_unit(x_idx.dtype) # type: ignore[arg-type] + td = Timedelta(seconds=1).as_unit(unit) + # Use DatetimeArray/TimedeltaArray method instead of linspace + # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]" + # has no attribute "_generate_range" + bins = x_idx._values._generate_range( # type: ignore[union-attr] + start=mn - td, end=mx + td, periods=nbins + 1, freq=None, unit=unit + ) + else: + mn -= 0.001 * abs(mn) if mn != 0 else 0.001 + mx += 0.001 * abs(mx) if mx != 0 else 0.001 + + bins = np.linspace(mn, mx, nbins + 1, endpoint=True) + else: # adjust end points after binning + if _is_dt_or_td(x_idx.dtype): + # Use DatetimeArray/TimedeltaArray method instead of linspace + + # error: Argument 1 to "dtype_to_unit" has incompatible type + # "dtype[Any] | ExtensionDtype"; expected "DatetimeTZDtype | dtype[Any]" + unit = dtype_to_unit(x_idx.dtype) # type: ignore[arg-type] + # error: Item "ExtensionArray" of "ExtensionArray | ndarray[Any, Any]" + # has no attribute "_generate_range" + bins = x_idx._values._generate_range( # type: ignore[union-attr] + start=mn, end=mx, periods=nbins + 1, freq=None, unit=unit + ) + else: + bins = np.linspace(mn, mx, nbins + 1, endpoint=True) + adj = (mx - mn) * 0.001 # 0.1% of the range + if right: + bins[0] -= adj + else: + bins[-1] += adj + + return Index(bins) + + +def _bins_to_cuts( + x_idx: Index, + bins: Index, + right: bool = True, + labels=None, + precision: int = 3, + include_lowest: bool = False, + duplicates: str = "raise", + ordered: bool = True, +): + if not ordered and labels is None: + raise ValueError("'labels' must be provided if 'ordered = False'") + + if duplicates not in ["raise", "drop"]: + raise ValueError( + "invalid value for 'duplicates' parameter, valid options are: raise, drop" + ) + + result: Categorical | np.ndarray + + if isinstance(bins, IntervalIndex): + # we have a fast-path here + ids = bins.get_indexer(x_idx) + cat_dtype = CategoricalDtype(bins, ordered=True) + result = Categorical.from_codes(ids, dtype=cat_dtype, validate=False) + return result, bins + + unique_bins = algos.unique(bins) + if len(unique_bins) < len(bins) and len(bins) != 2: + if duplicates == "raise": + raise ValueError( + f"Bin edges must be unique: {repr(bins)}.\n" + f"You can drop duplicate edges by setting the 'duplicates' kwarg" + ) + bins = unique_bins + + side: Literal["left", "right"] = "left" if right else "right" + + try: + ids = bins.searchsorted(x_idx, side=side) + except TypeError as err: + # e.g. test_datetime_nan_error if bins are DatetimeArray and x_idx + # is integers + if x_idx.dtype.kind == "m": + raise ValueError("bins must be of timedelta64 dtype") from err + elif x_idx.dtype.kind == bins.dtype.kind == "M": + raise ValueError( + "Cannot use timezone-naive bins with timezone-aware values, " + "or vice-versa" + ) from err + elif x_idx.dtype.kind == "M": + raise ValueError("bins must be of datetime64 dtype") from err + else: + raise + ids = ensure_platform_int(ids) + + if include_lowest: + ids[x_idx == bins[0]] = 1 + + na_mask = isna(x_idx) | (ids == len(bins)) | (ids == 0) + has_nas = na_mask.any() + + if labels is not False: + if not (labels is None or is_list_like(labels)): + raise ValueError( + "Bin labels must either be False, None or passed in as a " + "list-like argument" + ) + + if labels is None: + labels = _format_labels( + bins, precision, right=right, include_lowest=include_lowest + ) + elif ordered and len(set(labels)) != len(labels): + raise ValueError( + "labels must be unique if ordered=True; pass ordered=False " + "for duplicate labels" + ) + else: + if len(labels) != len(bins) - 1: + raise ValueError( + "Bin labels must be one fewer than the number of bin edges" + ) + + if not isinstance(getattr(labels, "dtype", None), CategoricalDtype): + labels = Categorical( + labels, + categories=labels if len(set(labels)) == len(labels) else None, + ordered=ordered, + ) + # TODO: handle mismatch between categorical label order and pandas.cut order. + np.putmask(ids, na_mask, 0) + result = algos.take_nd(labels, ids - 1) + + else: + result = ids - 1 + if has_nas: + result = result.astype(np.float64) + np.putmask(result, na_mask, np.nan) + + return result, bins + + +def _coerce_to_type(x: Index) -> tuple[Index, DtypeObj | None]: + """ + if the passed data is of datetime/timedelta, bool or nullable int type, + this method converts it to numeric so that cut or qcut method can + handle it + """ + dtype: DtypeObj | None = None + + if _is_dt_or_td(x.dtype): + dtype = x.dtype + elif is_bool_dtype(x.dtype): + # GH 20303 + x = x.astype(np.int64) + # To support cut and qcut for IntegerArray we convert to float dtype. + # Will properly support in the future. + # https://github.com/pandas-dev/pandas/pull/31290 + # https://github.com/pandas-dev/pandas/issues/31389 + elif isinstance(x.dtype, ExtensionDtype) and is_numeric_dtype(x.dtype): + x_arr = x.to_numpy(dtype=np.float64, na_value=np.nan) + x = Index(x_arr) + + return Index(x), dtype + + +def _is_dt_or_td(dtype: DtypeObj) -> bool: + # Note: the dtype here comes from an Index.dtype, so we know that that any + # dt64/td64 dtype is of a supported unit. + return isinstance(dtype, DatetimeTZDtype) or lib.is_np_dtype(dtype, "mM") + + +def _format_labels( + bins: Index, + precision: int, + right: bool = True, + include_lowest: bool = False, +): + """based on the dtype, return our labels""" + closed: IntervalLeftRight = "right" if right else "left" + + formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta] + + if _is_dt_or_td(bins.dtype): + # error: Argument 1 to "dtype_to_unit" has incompatible type + # "dtype[Any] | ExtensionDtype"; expected "DatetimeTZDtype | dtype[Any]" + unit = dtype_to_unit(bins.dtype) # type: ignore[arg-type] + formatter = lambda x: x + adjust = lambda x: x - Timedelta(1, unit=unit).as_unit(unit) + else: + precision = _infer_precision(precision, bins) + formatter = lambda x: _round_frac(x, precision) + adjust = lambda x: x - 10 ** (-precision) + + breaks = [formatter(b) for b in bins] + if right and include_lowest: + # adjust lhs of first interval by precision to account for being right closed + breaks[0] = adjust(breaks[0]) + + if _is_dt_or_td(bins.dtype): + # error: "Index" has no attribute "as_unit" + breaks = type(bins)(breaks).as_unit(unit) # type: ignore[attr-defined] + + return IntervalIndex.from_breaks(breaks, closed=closed) + + +def _preprocess_for_cut(x) -> Index: + """ + handles preprocessing for cut where we convert passed + input to array, strip the index information and store it + separately + """ + # Check that the passed array is a Pandas or Numpy object + # We don't want to strip away a Pandas data-type here (e.g. datetimetz) + ndim = getattr(x, "ndim", None) + if ndim is None: + x = np.asarray(x) + if x.ndim != 1: + raise ValueError("Input array must be 1 dimensional") + + return Index(x) + + +def _postprocess_for_cut(fac, bins, retbins: bool, original): + """ + handles post processing for the cut method where + we combine the index information if the originally passed + datatype was a series + """ + if isinstance(original, ABCSeries): + fac = original._constructor(fac, index=original.index, name=original.name) + + if not retbins: + return fac + + if isinstance(bins, Index) and is_numeric_dtype(bins.dtype): + bins = bins._values + + return fac, bins + + +def _round_frac(x, precision: int): + """ + Round the fractional part of the given number + """ + if not np.isfinite(x) or x == 0: + return x + else: + frac, whole = np.modf(x) + if whole == 0: + digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision + else: + digits = precision + return np.around(x, digits) + + +def _infer_precision(base_precision: int, bins: Index) -> int: + """ + Infer an appropriate precision for _round_frac + """ + for precision in range(base_precision, 20): + levels = np.asarray([_round_frac(b, precision) for b in bins]) + if algos.unique(levels).size == bins.size: + return precision + return base_precision # default diff --git a/venv/lib/python3.10/site-packages/pandas/core/reshape/util.py b/venv/lib/python3.10/site-packages/pandas/core/reshape/util.py new file mode 100644 index 0000000000000000000000000000000000000000..476e3922b6989e4267aeeafd5943a80c1599b1d5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/reshape/util.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.common import is_list_like + +if TYPE_CHECKING: + from pandas._typing import NumpyIndexT + + +def cartesian_product(X) -> list[np.ndarray]: + """ + Numpy version of itertools.product. + Sometimes faster (for large inputs)... + + Parameters + ---------- + X : list-like of list-likes + + Returns + ------- + product : list of ndarrays + + Examples + -------- + >>> cartesian_product([list('ABC'), [1, 2]]) + [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype=' NumpyIndexT: + """ + Index compat for np.tile. + + Notes + ----- + Does not support multi-dimensional `num`. + """ + if isinstance(arr, np.ndarray): + return np.tile(arr, num) + + # Otherwise we have an Index + taker = np.tile(np.arange(len(arr)), num) + return arr.take(taker)