diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9254f8ef89b629994237e89f79af06a8673b2345 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b663f38ba520dc49ab88e569551c5fef1ad64963 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/executor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90e0844b109f36b96469cc2047ace8c45a1409a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/_numba/__pycache__/extensions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1116c61c4ca8e48d94a6c9c6222aaf545d989e86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/__init__.py @@ -0,0 +1,27 @@ +from pandas.core._numba.kernels.mean_ import ( + grouped_mean, + sliding_mean, +) +from pandas.core._numba.kernels.min_max_ import ( + grouped_min_max, + sliding_min_max, +) +from pandas.core._numba.kernels.sum_ import ( + grouped_sum, + sliding_sum, +) +from pandas.core._numba.kernels.var_ import ( + grouped_var, + sliding_var, +) + +__all__ = [ + "sliding_mean", + "grouped_mean", + "sliding_sum", + "grouped_sum", + "sliding_var", + "grouped_var", + "sliding_min_max", + "grouped_min_max", +] diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py new file mode 100644 index 0000000000000000000000000000000000000000..f415804781753372a5715b6ffee6a7ab8cc70b64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/mean_.py @@ -0,0 +1,196 @@ +""" +Numba 1D mean kernels that can be shared by +* Dataframe / Series +* groupby +* rolling / expanding + +Mirrors pandas/_libs/window/aggregation.pyx +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numba +import numpy as np + +from pandas.core._numba.kernels.shared import is_monotonic_increasing +from pandas.core._numba.kernels.sum_ import grouped_kahan_sum + +if TYPE_CHECKING: + from pandas._typing import npt + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def add_mean( + val: float, + nobs: int, + sum_x: float, + neg_ct: int, + compensation: float, + num_consecutive_same_value: int, + prev_value: float, +) -> tuple[int, float, int, float, int, float]: + if not np.isnan(val): + nobs += 1 + y = val - compensation + t = sum_x + y + compensation = t - sum_x - y + sum_x = t + if val < 0: + neg_ct += 1 + + if val == prev_value: + num_consecutive_same_value += 1 + else: + num_consecutive_same_value = 1 + prev_value = val + + return nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def remove_mean( + val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float +) -> tuple[int, float, int, float]: + if not np.isnan(val): + nobs -= 1 + y = -val - compensation + t = sum_x + y + compensation = t - sum_x - y + sum_x = t + if val < 0: + neg_ct -= 1 + return nobs, sum_x, neg_ct, compensation + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def sliding_mean( + values: np.ndarray, + result_dtype: np.dtype, + start: np.ndarray, + end: np.ndarray, + min_periods: int, +) -> tuple[np.ndarray, list[int]]: + N = len(start) + nobs = 0 + sum_x = 0.0 + neg_ct = 0 + compensation_add = 0.0 + compensation_remove = 0.0 + + is_monotonic_increasing_bounds = is_monotonic_increasing( + start + ) and is_monotonic_increasing(end) + + output = np.empty(N, dtype=result_dtype) + + for i in range(N): + s = start[i] + e = end[i] + if i == 0 or not is_monotonic_increasing_bounds: + prev_value = values[s] + num_consecutive_same_value = 0 + + for j in range(s, e): + val = values[j] + ( + nobs, + sum_x, + neg_ct, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_mean( + val, + nobs, + sum_x, + neg_ct, + compensation_add, + num_consecutive_same_value, + prev_value, # pyright: ignore[reportGeneralTypeIssues] + ) + else: + for j in range(start[i - 1], s): + val = values[j] + nobs, sum_x, neg_ct, compensation_remove = remove_mean( + val, nobs, sum_x, neg_ct, compensation_remove + ) + + for j in range(end[i - 1], e): + val = values[j] + ( + nobs, + sum_x, + neg_ct, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_mean( + val, + nobs, + sum_x, + neg_ct, + compensation_add, + num_consecutive_same_value, + prev_value, # pyright: ignore[reportGeneralTypeIssues] + ) + + if nobs >= min_periods and nobs > 0: + result = sum_x / nobs + if num_consecutive_same_value >= nobs: + result = prev_value + elif neg_ct == 0 and result < 0: + result = 0 + elif neg_ct == nobs and result > 0: + result = 0 + else: + result = np.nan + + output[i] = result + + if not is_monotonic_increasing_bounds: + nobs = 0 + sum_x = 0.0 + neg_ct = 0 + compensation_remove = 0.0 + + # na_position is empty list since float64 can already hold nans + # Do list comprehension, since numba cannot figure out that na_pos is + # empty list of ints on its own + na_pos = [0 for i in range(0)] + return output, na_pos + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def grouped_mean( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, + min_periods: int, +) -> tuple[np.ndarray, list[int]]: + output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum( + values, result_dtype, labels, ngroups + ) + + # Post-processing, replace sums that don't satisfy min_periods + for lab in range(ngroups): + nobs = nobs_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + prev_value = prev_vals[lab] + sum_x = output[lab] + if nobs >= min_periods: + if num_consecutive_same_value >= nobs: + result = prev_value * nobs + else: + result = sum_x + else: + result = np.nan + result /= nobs + output[lab] = result + + # na_position is empty list since float64 can already hold nans + # Do list comprehension, since numba cannot figure out that na_pos is + # empty list of ints on its own + na_pos = [0 for i in range(0)] + return output, na_pos diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py new file mode 100644 index 0000000000000000000000000000000000000000..c9803980e64a6bf2ec5b274acd582850e3a07420 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/min_max_.py @@ -0,0 +1,125 @@ +""" +Numba 1D min/max kernels that can be shared by +* Dataframe / Series +* groupby +* rolling / expanding + +Mirrors pandas/_libs/window/aggregation.pyx +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numba +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def sliding_min_max( + values: np.ndarray, + result_dtype: np.dtype, + start: np.ndarray, + end: np.ndarray, + min_periods: int, + is_max: bool, +) -> tuple[np.ndarray, list[int]]: + N = len(start) + nobs = 0 + output = np.empty(N, dtype=result_dtype) + na_pos = [] + # Use deque once numba supports it + # https://github.com/numba/numba/issues/7417 + Q: list = [] + W: list = [] + for i in range(N): + curr_win_size = end[i] - start[i] + if i == 0: + st = start[i] + else: + st = end[i - 1] + + for k in range(st, end[i]): + ai = values[k] + if not np.isnan(ai): + nobs += 1 + elif is_max: + ai = -np.inf + else: + ai = np.inf + # Discard previous entries if we find new min or max + if is_max: + while Q and ((ai >= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]): + Q.pop() + else: + while Q and ((ai <= values[Q[-1]]) or values[Q[-1]] != values[Q[-1]]): + Q.pop() + Q.append(k) + W.append(k) + + # Discard entries outside and left of current window + while Q and Q[0] <= start[i] - 1: + Q.pop(0) + while W and W[0] <= start[i] - 1: + if not np.isnan(values[W[0]]): + nobs -= 1 + W.pop(0) + + # Save output based on index in input value array + if Q and curr_win_size > 0 and nobs >= min_periods: + output[i] = values[Q[0]] + else: + if values.dtype.kind != "i": + output[i] = np.nan + else: + na_pos.append(i) + + return output, na_pos + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def grouped_min_max( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, + min_periods: int, + is_max: bool, +) -> tuple[np.ndarray, list[int]]: + N = len(labels) + nobs = np.zeros(ngroups, dtype=np.int64) + na_pos = [] + output = np.empty(ngroups, dtype=result_dtype) + + for i in range(N): + lab = labels[i] + val = values[i] + if lab < 0: + continue + + if values.dtype.kind == "i" or not np.isnan(val): + nobs[lab] += 1 + else: + # NaN value cannot be a min/max value + continue + + if nobs[lab] == 1: + # First element in group, set output equal to this + output[lab] = val + continue + + if is_max: + if val > output[lab]: + output[lab] = val + else: + if val < output[lab]: + output[lab] = val + + # Set labels that don't satisfy min_periods as np.nan + for lab, count in enumerate(nobs): + if count < min_periods: + na_pos.append(lab) + + return output, na_pos diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py new file mode 100644 index 0000000000000000000000000000000000000000..c52372fe6b08f3ec9ec6e836341e08ac804d50f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/shared.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numba + +if TYPE_CHECKING: + import numpy as np + + +@numba.jit( + # error: Any? not callable + numba.boolean(numba.int64[:]), # type: ignore[misc] + nopython=True, + nogil=True, + parallel=False, +) +def is_monotonic_increasing(bounds: np.ndarray) -> bool: + """Check if int64 values are monotonically increasing.""" + n = len(bounds) + if n < 2: + return True + prev = bounds[0] + for i in range(1, n): + cur = bounds[i] + if cur < prev: + return False + prev = cur + return True diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py new file mode 100644 index 0000000000000000000000000000000000000000..94db84267ceecb83234fc2e0b231566a2fdffd66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/sum_.py @@ -0,0 +1,244 @@ +""" +Numba 1D sum kernels that can be shared by +* Dataframe / Series +* groupby +* rolling / expanding + +Mirrors pandas/_libs/window/aggregation.pyx +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +import numba +from numba.extending import register_jitable +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + +from pandas.core._numba.kernels.shared import is_monotonic_increasing + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def add_sum( + val: Any, + nobs: int, + sum_x: Any, + compensation: Any, + num_consecutive_same_value: int, + prev_value: Any, +) -> tuple[int, Any, Any, int, Any]: + if not np.isnan(val): + nobs += 1 + y = val - compensation + t = sum_x + y + compensation = t - sum_x - y + sum_x = t + + if val == prev_value: + num_consecutive_same_value += 1 + else: + num_consecutive_same_value = 1 + prev_value = val + + return nobs, sum_x, compensation, num_consecutive_same_value, prev_value + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def remove_sum( + val: Any, nobs: int, sum_x: Any, compensation: Any +) -> tuple[int, Any, Any]: + if not np.isnan(val): + nobs -= 1 + y = -val - compensation + t = sum_x + y + compensation = t - sum_x - y + sum_x = t + return nobs, sum_x, compensation + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def sliding_sum( + values: np.ndarray, + result_dtype: np.dtype, + start: np.ndarray, + end: np.ndarray, + min_periods: int, +) -> tuple[np.ndarray, list[int]]: + dtype = values.dtype + + na_val: object = np.nan + if dtype.kind == "i": + na_val = 0 + + N = len(start) + nobs = 0 + sum_x = 0 + compensation_add = 0 + compensation_remove = 0 + na_pos = [] + + is_monotonic_increasing_bounds = is_monotonic_increasing( + start + ) and is_monotonic_increasing(end) + + output = np.empty(N, dtype=result_dtype) + + for i in range(N): + s = start[i] + e = end[i] + if i == 0 or not is_monotonic_increasing_bounds: + prev_value = values[s] + num_consecutive_same_value = 0 + + for j in range(s, e): + val = values[j] + ( + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_sum( + val, + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + else: + for j in range(start[i - 1], s): + val = values[j] + nobs, sum_x, compensation_remove = remove_sum( + val, nobs, sum_x, compensation_remove + ) + + for j in range(end[i - 1], e): + val = values[j] + ( + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_sum( + val, + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + + if nobs == 0 == min_periods: + result: object = 0 + elif nobs >= min_periods: + if num_consecutive_same_value >= nobs: + result = prev_value * nobs + else: + result = sum_x + else: + result = na_val + if dtype.kind == "i": + na_pos.append(i) + + output[i] = result + + if not is_monotonic_increasing_bounds: + nobs = 0 + sum_x = 0 + compensation_remove = 0 + + return output, na_pos + + +# Mypy/pyright don't like the fact that the decorator is untyped +@register_jitable # type: ignore[misc] +def grouped_kahan_sum( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, +) -> tuple[ + np.ndarray, npt.NDArray[np.int64], np.ndarray, npt.NDArray[np.int64], np.ndarray +]: + N = len(labels) + + nobs_arr = np.zeros(ngroups, dtype=np.int64) + comp_arr = np.zeros(ngroups, dtype=values.dtype) + consecutive_counts = np.zeros(ngroups, dtype=np.int64) + prev_vals = np.zeros(ngroups, dtype=values.dtype) + output = np.zeros(ngroups, dtype=result_dtype) + + for i in range(N): + lab = labels[i] + val = values[i] + + if lab < 0: + continue + + sum_x = output[lab] + nobs = nobs_arr[lab] + compensation_add = comp_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + prev_value = prev_vals[lab] + + ( + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_sum( + val, + nobs, + sum_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + + output[lab] = sum_x + consecutive_counts[lab] = num_consecutive_same_value + prev_vals[lab] = prev_value + comp_arr[lab] = compensation_add + nobs_arr[lab] = nobs + return output, nobs_arr, comp_arr, consecutive_counts, prev_vals + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def grouped_sum( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, + min_periods: int, +) -> tuple[np.ndarray, list[int]]: + na_pos = [] + + output, nobs_arr, comp_arr, consecutive_counts, prev_vals = grouped_kahan_sum( + values, result_dtype, labels, ngroups + ) + + # Post-processing, replace sums that don't satisfy min_periods + for lab in range(ngroups): + nobs = nobs_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + prev_value = prev_vals[lab] + sum_x = output[lab] + if nobs >= min_periods: + if num_consecutive_same_value >= nobs: + result = prev_value * nobs + else: + result = sum_x + else: + result = sum_x # Don't change val, will be replaced by nan later + na_pos.append(lab) + output[lab] = result + + return output, na_pos diff --git a/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py new file mode 100644 index 0000000000000000000000000000000000000000..c63d0b90b0fc3f4a4a532b9ce85e57d8bd823fb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/_numba/kernels/var_.py @@ -0,0 +1,245 @@ +""" +Numba 1D var kernels that can be shared by +* Dataframe / Series +* groupby +* rolling / expanding + +Mirrors pandas/_libs/window/aggregation.pyx +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numba +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + +from pandas.core._numba.kernels.shared import is_monotonic_increasing + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def add_var( + val: float, + nobs: int, + mean_x: float, + ssqdm_x: float, + compensation: float, + num_consecutive_same_value: int, + prev_value: float, +) -> tuple[int, float, float, float, int, float]: + if not np.isnan(val): + if val == prev_value: + num_consecutive_same_value += 1 + else: + num_consecutive_same_value = 1 + prev_value = val + + nobs += 1 + prev_mean = mean_x - compensation + y = val - compensation + t = y - mean_x + compensation = t + mean_x - y + delta = t + if nobs: + mean_x += delta / nobs + else: + mean_x = 0 + ssqdm_x += (val - prev_mean) * (val - mean_x) + return nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def remove_var( + val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float +) -> tuple[int, float, float, float]: + if not np.isnan(val): + nobs -= 1 + if nobs: + prev_mean = mean_x - compensation + y = val - compensation + t = y - mean_x + compensation = t + mean_x - y + delta = t + mean_x -= delta / nobs + ssqdm_x -= (val - prev_mean) * (val - mean_x) + else: + mean_x = 0 + ssqdm_x = 0 + return nobs, mean_x, ssqdm_x, compensation + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def sliding_var( + values: np.ndarray, + result_dtype: np.dtype, + start: np.ndarray, + end: np.ndarray, + min_periods: int, + ddof: int = 1, +) -> tuple[np.ndarray, list[int]]: + N = len(start) + nobs = 0 + mean_x = 0.0 + ssqdm_x = 0.0 + compensation_add = 0.0 + compensation_remove = 0.0 + + min_periods = max(min_periods, 1) + is_monotonic_increasing_bounds = is_monotonic_increasing( + start + ) and is_monotonic_increasing(end) + + output = np.empty(N, dtype=result_dtype) + + for i in range(N): + s = start[i] + e = end[i] + if i == 0 or not is_monotonic_increasing_bounds: + prev_value = values[s] + num_consecutive_same_value = 0 + + for j in range(s, e): + val = values[j] + ( + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_var( + val, + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + else: + for j in range(start[i - 1], s): + val = values[j] + nobs, mean_x, ssqdm_x, compensation_remove = remove_var( + val, nobs, mean_x, ssqdm_x, compensation_remove + ) + + for j in range(end[i - 1], e): + val = values[j] + ( + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_var( + val, + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + + if nobs >= min_periods and nobs > ddof: + if nobs == 1 or num_consecutive_same_value >= nobs: + result = 0.0 + else: + result = ssqdm_x / (nobs - ddof) + else: + result = np.nan + + output[i] = result + + if not is_monotonic_increasing_bounds: + nobs = 0 + mean_x = 0.0 + ssqdm_x = 0.0 + compensation_remove = 0.0 + + # na_position is empty list since float64 can already hold nans + # Do list comprehension, since numba cannot figure out that na_pos is + # empty list of ints on its own + na_pos = [0 for i in range(0)] + return output, na_pos + + +@numba.jit(nopython=True, nogil=True, parallel=False) +def grouped_var( + values: np.ndarray, + result_dtype: np.dtype, + labels: npt.NDArray[np.intp], + ngroups: int, + min_periods: int, + ddof: int = 1, +) -> tuple[np.ndarray, list[int]]: + N = len(labels) + + nobs_arr = np.zeros(ngroups, dtype=np.int64) + comp_arr = np.zeros(ngroups, dtype=values.dtype) + consecutive_counts = np.zeros(ngroups, dtype=np.int64) + prev_vals = np.zeros(ngroups, dtype=values.dtype) + output = np.zeros(ngroups, dtype=result_dtype) + means = np.zeros(ngroups, dtype=result_dtype) + + for i in range(N): + lab = labels[i] + val = values[i] + + if lab < 0: + continue + + mean_x = means[lab] + ssqdm_x = output[lab] + nobs = nobs_arr[lab] + compensation_add = comp_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + prev_value = prev_vals[lab] + + ( + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) = add_var( + val, + nobs, + mean_x, + ssqdm_x, + compensation_add, + num_consecutive_same_value, + prev_value, + ) + + output[lab] = ssqdm_x + means[lab] = mean_x + consecutive_counts[lab] = num_consecutive_same_value + prev_vals[lab] = prev_value + comp_arr[lab] = compensation_add + nobs_arr[lab] = nobs + + # Post-processing, replace vars that don't satisfy min_periods + for lab in range(ngroups): + nobs = nobs_arr[lab] + num_consecutive_same_value = consecutive_counts[lab] + ssqdm_x = output[lab] + if nobs >= min_periods and nobs > ddof: + if nobs == 1 or num_consecutive_same_value >= nobs: + result = 0.0 + else: + result = ssqdm_x / (nobs - ddof) + else: + result = np.nan + output[lab] = result + + # Second pass to get the std.dev + # na_position is empty list since float64 can already hold nans + # Do list comprehension, since numba cannot figure out that na_pos is + # empty list of ints on its own + na_pos = [0 for i in range(0)] + return output, na_pos diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1972182a9fdf2aa410aa69a8b72a0039dda611a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77fc5ea1448b15dc8c0dc55694344054b1dc24f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91b4ff89bb7007198c4daa96bda4ddaaec4f3315 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/astype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98316845134eda96c2bf9385d66ab0ee497db318 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e0ba6d21a91bc8adb4607e24a88a07f7dfd0253 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/cast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ebd971037e9c5cddd7ed5ac75abd654b714e3dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de5393e42aeb4d033074745ca1a66145f3fbe015 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/concat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..914b08feee6e556016632efb3a85a50487a9abed Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/dtypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..547ac9b5396a3d5cbe6a99804d1cdee9a164c66e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/generic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..256eade341097c39543eee34db70323c2f3cdb90 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/inference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6921a298282d9b117a80e218a94832f7414dd85e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/dtypes/__pycache__/missing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/api.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/api.py new file mode 100644 index 0000000000000000000000000000000000000000..254abe330b8e7229d0c2a27519e82efbe902c537 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/api.py @@ -0,0 +1,85 @@ +from pandas.core.dtypes.common import ( + is_any_real_numeric_dtype, + is_array_like, + is_bool, + is_bool_dtype, + is_categorical_dtype, + is_complex, + is_complex_dtype, + is_datetime64_any_dtype, + is_datetime64_dtype, + is_datetime64_ns_dtype, + is_datetime64tz_dtype, + is_dict_like, + is_dtype_equal, + is_extension_array_dtype, + is_file_like, + is_float, + is_float_dtype, + is_hashable, + is_int64_dtype, + is_integer, + is_integer_dtype, + is_interval, + is_interval_dtype, + is_iterator, + is_list_like, + is_named_tuple, + is_number, + is_numeric_dtype, + is_object_dtype, + is_period_dtype, + is_re, + is_re_compilable, + is_scalar, + is_signed_integer_dtype, + is_sparse, + is_string_dtype, + is_timedelta64_dtype, + is_timedelta64_ns_dtype, + is_unsigned_integer_dtype, + pandas_dtype, +) + +__all__ = [ + "is_any_real_numeric_dtype", + "is_array_like", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_dict_like", + "is_dtype_equal", + "is_extension_array_dtype", + "is_file_like", + "is_float", + "is_float_dtype", + "is_hashable", + "is_int64_dtype", + "is_integer", + "is_integer_dtype", + "is_interval", + "is_interval_dtype", + "is_iterator", + "is_list_like", + "is_named_tuple", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_period_dtype", + "is_re", + "is_re_compilable", + "is_scalar", + "is_signed_integer_dtype", + "is_sparse", + "is_string_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "pandas_dtype", +] diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/astype.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/astype.py new file mode 100644 index 0000000000000000000000000000000000000000..f5579082c679bf131c056f3f2029b2485e88bd0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/astype.py @@ -0,0 +1,301 @@ +""" +Functions for implementing 'astype' methods according to pandas conventions, +particularly ones that differ from numpy. +""" +from __future__ import annotations + +import inspect +from typing import ( + TYPE_CHECKING, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas.errors import IntCastingNaNError + +from pandas.core.dtypes.common import ( + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + NumpyEADtype, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + DtypeObj, + IgnoreRaise, + ) + + from pandas.core.arrays import ExtensionArray + +_dtype_obj = np.dtype(object) + + +@overload +def _astype_nansafe( + arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ... +) -> np.ndarray: + ... + + +@overload +def _astype_nansafe( + arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ... +) -> ExtensionArray: + ... + + +def _astype_nansafe( + arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False +) -> ArrayLike: + """ + Cast the elements of an array to a given dtype a nan-safe manner. + + Parameters + ---------- + arr : ndarray + dtype : np.dtype or ExtensionDtype + copy : bool, default True + If False, a view will be attempted but may fail, if + e.g. the item sizes don't align. + skipna: bool, default False + Whether or not we should skip NaN when casting as a string-type. + + Raises + ------ + ValueError + The dtype was a datetime64/timedelta64 dtype, but it had no unit. + """ + + # dispatch on extension dtype if needed + if isinstance(dtype, ExtensionDtype): + return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy) + + elif not isinstance(dtype, np.dtype): # pragma: no cover + raise ValueError("dtype must be np.dtype or ExtensionDtype") + + if arr.dtype.kind in "mM": + from pandas.core.construction import ensure_wrapped_if_datetimelike + + arr = ensure_wrapped_if_datetimelike(arr) + res = arr.astype(dtype, copy=copy) + return np.asarray(res) + + if issubclass(dtype.type, str): + shape = arr.shape + if arr.ndim > 1: + arr = arr.ravel() + return lib.ensure_string_array( + arr, skipna=skipna, convert_na_value=False + ).reshape(shape) + + elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu": + return _astype_float_to_int_nansafe(arr, dtype, copy) + + elif arr.dtype == object: + # if we have a datetime/timedelta array of objects + # then coerce to datetime64[ns] and use DatetimeArray.astype + + if lib.is_np_dtype(dtype, "M"): + from pandas.core.arrays import DatetimeArray + + dta = DatetimeArray._from_sequence(arr, dtype=dtype) + return dta._ndarray + + elif lib.is_np_dtype(dtype, "m"): + from pandas.core.construction import ensure_wrapped_if_datetimelike + + # bc we know arr.dtype == object, this is equivalent to + # `np.asarray(to_timedelta(arr))`, but using a lower-level API that + # does not require a circular import. + tdvals = array_to_timedelta64(arr).view("m8[ns]") + + tda = ensure_wrapped_if_datetimelike(tdvals) + return tda.astype(dtype, copy=False)._ndarray + + if dtype.name in ("datetime64", "timedelta64"): + msg = ( + f"The '{dtype.name}' dtype has no unit. Please pass in " + f"'{dtype.name}[ns]' instead." + ) + raise ValueError(msg) + + if copy or arr.dtype == object or dtype == object: + # Explicit copy, or required since NumPy can't view from / to object. + return arr.astype(dtype, copy=True) + + return arr.astype(dtype, copy=copy) + + +def _astype_float_to_int_nansafe( + values: np.ndarray, dtype: np.dtype, copy: bool +) -> np.ndarray: + """ + astype with a check preventing converting NaN to an meaningless integer value. + """ + if not np.isfinite(values).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + if dtype.kind == "u": + # GH#45151 + if not (values >= 0).all(): + raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + return values.astype(dtype, copy=copy) + + +def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : dtype object + copy : bool, default False + copy if indicated + + Returns + ------- + ndarray or ExtensionArray + """ + if values.dtype == dtype: + if copy: + return values.copy() + return values + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + values = values.astype(dtype, copy=copy) + + else: + values = _astype_nansafe(values, dtype, copy=copy) + + # in pandas we don't store numpy str dtypes, so convert to object + if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + return values + + +def astype_array_safe( + values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise" +) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + This basically is the implementation for DataFrame/Series.astype and + includes all custom logic for pandas (NaN-safety, converting str to object, + not allowing ) + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : str, dtype convertible + copy : bool, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + Returns + ------- + ndarray or ExtensionArray + """ + errors_legal_values = ("raise", "ignore") + + if errors not in errors_legal_values: + invalid_arg = ( + "Expected value of kwarg 'errors' to be one of " + f"{list(errors_legal_values)}. Supplied value is '{errors}'" + ) + raise ValueError(invalid_arg) + + if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): + msg = ( + f"Expected an instance of {dtype.__name__}, " + "but got the class instead. Try instantiating 'dtype'." + ) + raise TypeError(msg) + + dtype = pandas_dtype(dtype) + if isinstance(dtype, NumpyEADtype): + # Ensure we don't end up with a NumpyExtensionArray + dtype = dtype.numpy_dtype + + try: + new_values = astype_array(values, dtype, copy=copy) + except (ValueError, TypeError): + # e.g. _astype_nansafe can fail on object-dtype of strings + # trying to convert to float + if errors == "ignore": + new_values = values + else: + raise + + return new_values + + +def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool: + """Checks if astype avoided copying the data. + + Parameters + ---------- + dtype : Original dtype + new_dtype : target dtype + + Returns + ------- + True if new data is a view or not guaranteed to be a copy, False otherwise + """ + if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype): + new_dtype, dtype = dtype, new_dtype + + if dtype == new_dtype: + return True + + elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype): + # Only equal numpy dtypes avoid a copy + return False + + elif is_string_dtype(dtype) and is_string_dtype(new_dtype): + # Potentially! a view when converting from object to string + return True + + elif is_object_dtype(dtype) and new_dtype.kind == "O": + # When the underlying array has dtype object, we don't have to make a copy + return True + + elif dtype.kind in "mM" and new_dtype.kind in "mM": + dtype = getattr(dtype, "numpy_dtype", dtype) + new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype) + return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None) + + numpy_dtype = getattr(dtype, "numpy_dtype", None) + new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None) + + if numpy_dtype is None and isinstance(dtype, np.dtype): + numpy_dtype = dtype + + if new_numpy_dtype is None and isinstance(new_dtype, np.dtype): + new_numpy_dtype = new_dtype + + if numpy_dtype is not None and new_numpy_dtype is not None: + # if both have NumPy dtype or one of them is a numpy dtype + # they are only a view when the numpy dtypes are equal, e.g. + # int64 -> Int64 or int64[pyarrow] + # int64 -> Int32 copies + return numpy_dtype == new_numpy_dtype + + # Assume this is a view since we don't know for sure if a copy was made + return True diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/base.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/base.py new file mode 100644 index 0000000000000000000000000000000000000000..6b00a5284ec5b18809e233e9ef89e31771ac651e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/base.py @@ -0,0 +1,583 @@ +""" +Extend pandas with custom array types. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + TypeVar, + cast, + overload, +) + +import numpy as np + +from pandas._libs import missing as libmissing +from pandas._libs.hashtable import object_hash +from pandas._libs.properties import cache_readonly +from pandas.errors import AbstractMethodError + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeObj, + Self, + Shape, + npt, + type_t, + ) + + from pandas import Index + from pandas.core.arrays import ExtensionArray + + # To parameterize on same ExtensionDtype + ExtensionDtypeT = TypeVar("ExtensionDtypeT", bound="ExtensionDtype") + + +class ExtensionDtype: + """ + A custom data type, to be paired with an ExtensionArray. + + See Also + -------- + extensions.register_extension_dtype: Register an ExtensionType + with pandas as class decorator. + extensions.ExtensionArray: Abstract base class for custom 1-D array types. + + Notes + ----- + The interface includes the following abstract methods that must + be implemented by subclasses: + + * type + * name + * construct_array_type + + The following attributes and methods influence the behavior of the dtype in + pandas operations + + * _is_numeric + * _is_boolean + * _get_common_dtype + + The `na_value` class attribute can be used to set the default NA value + for this type. :attr:`numpy.nan` is used by default. + + ExtensionDtypes are required to be hashable. The base class provides + a default implementation, which relies on the ``_metadata`` class + attribute. ``_metadata`` should be a tuple containing the strings + that define your data type. For example, with ``PeriodDtype`` that's + the ``freq`` attribute. + + **If you have a parametrized dtype you should set the ``_metadata`` + class property**. + + Ideally, the attributes in ``_metadata`` will match the + parameters to your ``ExtensionDtype.__init__`` (if any). If any of + the attributes in ``_metadata`` don't implement the standard + ``__eq__`` or ``__hash__``, the default implementations here will not + work. + + Examples + -------- + + For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method + can be implemented: this method receives a pyarrow Array or ChunkedArray + as only argument and is expected to return the appropriate pandas + ExtensionArray for this dtype and the passed values: + + >>> import pyarrow + >>> from pandas.api.extensions import ExtensionArray + >>> class ExtensionDtype: + ... def __from_arrow__( + ... self, + ... array: pyarrow.Array | pyarrow.ChunkedArray + ... ) -> ExtensionArray: + ... ... + + This class does not inherit from 'abc.ABCMeta' for performance reasons. + Methods and properties required by the interface raise + ``pandas.errors.AbstractMethodError`` and no ``register`` method is + provided for registering virtual subclasses. + """ + + _metadata: tuple[str, ...] = () + + def __str__(self) -> str: + return self.name + + def __eq__(self, other: object) -> bool: + """ + Check whether 'other' is equal to self. + + By default, 'other' is considered equal if either + + * it's a string matching 'self.name'. + * it's an instance of this type and all of the attributes + in ``self._metadata`` are equal between `self` and `other`. + + Parameters + ---------- + other : Any + + Returns + ------- + bool + """ + if isinstance(other, str): + try: + other = self.construct_from_string(other) + except TypeError: + return False + if isinstance(other, type(self)): + return all( + getattr(self, attr) == getattr(other, attr) for attr in self._metadata + ) + return False + + def __hash__(self) -> int: + # for python>=3.10, different nan objects have different hashes + # we need to avoid that and thus use hash function with old behavior + return object_hash(tuple(getattr(self, attr) for attr in self._metadata)) + + def __ne__(self, other: object) -> bool: + return not self.__eq__(other) + + @property + def na_value(self) -> object: + """ + Default NA value to use for this type. + + This is used in e.g. ExtensionArray.take. This should be the + user-facing "boxed" version of the NA value, not the physical NA value + for storage. e.g. for JSONArray, this is an empty dictionary. + """ + return np.nan + + @property + def type(self) -> type_t[Any]: + """ + The scalar type for the array, e.g. ``int`` + + It's expected ``ExtensionArray[item]`` returns an instance + of ``ExtensionDtype.type`` for scalar ``item``, assuming + that value is valid (not NA). NA values do not need to be + instances of `type`. + """ + raise AbstractMethodError(self) + + @property + def kind(self) -> str: + """ + A character code (one of 'biufcmMOSUV'), default 'O' + + This should match the NumPy dtype used when the array is + converted to an ndarray, which is probably 'O' for object if + the extension type cannot be represented as a built-in NumPy + type. + + See Also + -------- + numpy.dtype.kind + """ + return "O" + + @property + def name(self) -> str: + """ + A string identifying the data type. + + Will be used for display in, e.g. ``Series.dtype`` + """ + raise AbstractMethodError(self) + + @property + def names(self) -> list[str] | None: + """ + Ordered list of field names, or None if there are no fields. + + This is for compatibility with NumPy arrays, and may be removed in the + future. + """ + return None + + @classmethod + def construct_array_type(cls) -> type_t[ExtensionArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + raise AbstractMethodError(cls) + + def empty(self, shape: Shape) -> ExtensionArray: + """ + Construct an ExtensionArray of this dtype with the given shape. + + Analogous to numpy.empty. + + Parameters + ---------- + shape : int or tuple[int] + + Returns + ------- + ExtensionArray + """ + cls = self.construct_array_type() + return cls._empty(shape, dtype=self) + + @classmethod + def construct_from_string(cls, string: str) -> Self: + r""" + Construct this type from a string. + + This is useful mainly for data types that accept parameters. + For example, a period dtype accepts a frequency parameter that + can be set as ``period[h]`` (where H means hourly frequency). + + By default, in the abstract class, just the name of the type is + expected. But subclasses can overwrite this method to accept + parameters. + + Parameters + ---------- + string : str + The name of the type, for example ``category``. + + Returns + ------- + ExtensionDtype + Instance of the dtype. + + Raises + ------ + TypeError + If a class cannot be constructed from this 'string'. + + Examples + -------- + For extension dtypes with arguments the following may be an + adequate implementation. + + >>> import re + >>> @classmethod + ... def construct_from_string(cls, string): + ... pattern = re.compile(r"^my_type\[(?P.+)\]$") + ... match = pattern.match(string) + ... if match: + ... return cls(**match.groupdict()) + ... else: + ... raise TypeError( + ... f"Cannot construct a '{cls.__name__}' from '{string}'" + ... ) + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + # error: Non-overlapping equality check (left operand type: "str", right + # operand type: "Callable[[ExtensionDtype], str]") [comparison-overlap] + assert isinstance(cls.name, str), (cls, type(cls.name)) + if string != cls.name: + raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") + return cls() + + @classmethod + def is_dtype(cls, dtype: object) -> bool: + """ + Check if we match 'dtype'. + + Parameters + ---------- + dtype : object + The object to check. + + Returns + ------- + bool + + Notes + ----- + The default implementation is True if + + 1. ``cls.construct_from_string(dtype)`` is an instance + of ``cls``. + 2. ``dtype`` is an object and is an instance of ``cls`` + 3. ``dtype`` has a ``dtype`` attribute, and any of the above + conditions is true for ``dtype.dtype``. + """ + dtype = getattr(dtype, "dtype", dtype) + + if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)): + # https://github.com/pandas-dev/pandas/issues/22960 + # avoid passing data to `construct_from_string`. This could + # cause a FutureWarning from numpy about failing elementwise + # comparison from, e.g., comparing DataFrame == 'category'. + return False + elif dtype is None: + return False + elif isinstance(dtype, cls): + return True + if isinstance(dtype, str): + try: + return cls.construct_from_string(dtype) is not None + except TypeError: + return False + return False + + @property + def _is_numeric(self) -> bool: + """ + Whether columns with this dtype should be considered numeric. + + By default ExtensionDtypes are assumed to be non-numeric. + They'll be excluded from operations that exclude non-numeric + columns, like (groupby) reductions, plotting, etc. + """ + return False + + @property + def _is_boolean(self) -> bool: + """ + Whether this dtype should be considered boolean. + + By default, ExtensionDtypes are assumed to be non-numeric. + Setting this to True will affect the behavior of several places, + e.g. + + * is_bool + * boolean indexing + + Returns + ------- + bool + """ + return False + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + """ + Return the common dtype, if one exists. + + Used in `find_common_type` implementation. This is for example used + to determine the resulting dtype in a concat operation. + + If no common dtype exists, return None (which gives the other dtypes + the chance to determine a common dtype). If all dtypes in the list + return None, then the common dtype will be "object" dtype (this means + it is never needed to return "object" dtype from this method itself). + + Parameters + ---------- + dtypes : list of dtypes + The dtypes for which to determine a common dtype. This is a list + of np.dtype or ExtensionDtype instances. + + Returns + ------- + Common dtype (np.dtype or ExtensionDtype) or None + """ + if len(set(dtypes)) == 1: + # only itself + return self + else: + return None + + @property + def _can_hold_na(self) -> bool: + """ + Can arrays of this dtype hold NA values? + """ + return True + + @property + def _is_immutable(self) -> bool: + """ + Can arrays with this dtype be modified with __setitem__? If not, return + True. + + Immutable arrays are expected to raise TypeError on __setitem__ calls. + """ + return False + + @cache_readonly + def index_class(self) -> type_t[Index]: + """ + The Index subclass to return from Index.__new__ when this dtype is + encountered. + """ + from pandas import Index + + return Index + + @property + def _supports_2d(self) -> bool: + """ + Do ExtensionArrays with this dtype support 2D arrays? + + Historically ExtensionArrays were limited to 1D. By returning True here, + authors can indicate that their arrays support 2D instances. This can + improve performance in some cases, particularly operations with `axis=1`. + + Arrays that support 2D values should: + + - implement Array.reshape + - subclass the Dim2CompatTests in tests.extension.base + - _concat_same_type should support `axis` keyword + - _reduce and reductions should support `axis` keyword + """ + return False + + @property + def _can_fast_transpose(self) -> bool: + """ + Is transposing an array with this dtype zero-copy? + + Only relevant for cases where _supports_2d is True. + """ + return False + + +class StorageExtensionDtype(ExtensionDtype): + """ExtensionDtype that may be backed by more than one implementation.""" + + name: str + _metadata = ("storage",) + + def __init__(self, storage: str | None = None) -> None: + self.storage = storage + + def __repr__(self) -> str: + return f"{self.name}[{self.storage}]" + + def __str__(self) -> str: + return self.name + + def __eq__(self, other: object) -> bool: + if isinstance(other, str) and other == self.name: + return True + return super().__eq__(other) + + def __hash__(self) -> int: + # custom __eq__ so have to override __hash__ + return super().__hash__() + + @property + def na_value(self) -> libmissing.NAType: + return libmissing.NA + + +def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: + """ + Register an ExtensionType with pandas as class decorator. + + This enables operations like ``.astype(name)`` for the name + of the ExtensionDtype. + + Returns + ------- + callable + A class decorator. + + Examples + -------- + >>> from pandas.api.extensions import register_extension_dtype, ExtensionDtype + >>> @register_extension_dtype + ... class MyExtensionDtype(ExtensionDtype): + ... name = "myextension" + """ + _registry.register(cls) + return cls + + +class Registry: + """ + Registry for dtype inference. + + The registry allows one to map a string repr of a extension + dtype to an extension dtype. The string alias can be used in several + places, including + + * Series and Index constructors + * :meth:`pandas.array` + * :meth:`pandas.Series.astype` + + Multiple extension types can be registered. + These are tried in order. + """ + + def __init__(self) -> None: + self.dtypes: list[type_t[ExtensionDtype]] = [] + + def register(self, dtype: type_t[ExtensionDtype]) -> None: + """ + Parameters + ---------- + dtype : ExtensionDtype class + """ + if not issubclass(dtype, ExtensionDtype): + raise ValueError("can only register pandas extension dtypes") + + self.dtypes.append(dtype) + + @overload + def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: + ... + + @overload + def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT: + ... + + @overload + def find(self, dtype: str) -> ExtensionDtype | None: + ... + + @overload + def find( + self, dtype: npt.DTypeLike + ) -> type_t[ExtensionDtype] | ExtensionDtype | None: + ... + + def find( + self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike + ) -> type_t[ExtensionDtype] | ExtensionDtype | None: + """ + Parameters + ---------- + dtype : ExtensionDtype class or instance or str or numpy dtype or python type + + Returns + ------- + return the first matching dtype, otherwise return None + """ + if not isinstance(dtype, str): + dtype_type: type_t + if not isinstance(dtype, type): + dtype_type = type(dtype) + else: + dtype_type = dtype + if issubclass(dtype_type, ExtensionDtype): + # cast needed here as mypy doesn't know we have figured + # out it is an ExtensionDtype or type_t[ExtensionDtype] + return cast("ExtensionDtype | type_t[ExtensionDtype]", dtype) + + return None + + for dtype_type in self.dtypes: + try: + return dtype_type.construct_from_string(dtype) + except TypeError: + pass + + return None + + +_registry = Registry() diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/common.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/common.py new file mode 100644 index 0000000000000000000000000000000000000000..df0251d141984c9223d44a178db99b2651af7e78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/common.py @@ -0,0 +1,1748 @@ +""" +Common type operations. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) +import warnings + +import numpy as np + +from pandas._libs import ( + Interval, + Period, + algos, + lib, +) +from pandas._libs.tslibs import conversion +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import _registry as registry +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PeriodDtype, + SparseDtype, +) +from pandas.core.dtypes.generic import ABCIndex +from pandas.core.dtypes.inference import ( + is_array_like, + is_bool, + is_complex, + is_dataclass, + is_decimal, + is_dict_like, + is_file_like, + is_float, + is_hashable, + is_integer, + is_interval, + is_iterator, + is_list_like, + is_named_tuple, + is_nested_list_like, + is_number, + is_re, + is_re_compilable, + is_scalar, + is_sequence, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + DtypeObj, + ) + +DT64NS_DTYPE = conversion.DT64NS_DTYPE +TD64NS_DTYPE = conversion.TD64NS_DTYPE +INT64_DTYPE = np.dtype(np.int64) + +# oh the troubles to reduce import time +_is_scipy_sparse = None + +ensure_float64 = algos.ensure_float64 +ensure_int64 = algos.ensure_int64 +ensure_int32 = algos.ensure_int32 +ensure_int16 = algos.ensure_int16 +ensure_int8 = algos.ensure_int8 +ensure_platform_int = algos.ensure_platform_int +ensure_object = algos.ensure_object +ensure_uint64 = algos.ensure_uint64 + + +def ensure_str(value: bytes | Any) -> str: + """ + Ensure that bytes and non-strings get converted into ``str`` objects. + """ + if isinstance(value, bytes): + value = value.decode("utf-8") + elif not isinstance(value, str): + value = str(value) + return value + + +def ensure_python_int(value: int | np.integer) -> int: + """ + Ensure that a value is a python int. + + Parameters + ---------- + value: int or numpy.integer + + Returns + ------- + int + + Raises + ------ + TypeError: if the value isn't an int or can't be converted to one. + """ + if not (is_integer(value) or is_float(value)): + if not is_scalar(value): + raise TypeError( + f"Value needs to be a scalar value, was type {type(value).__name__}" + ) + raise TypeError(f"Wrong type {type(value)} for value {value}") + try: + new_value = int(value) + assert new_value == value + except (TypeError, ValueError, AssertionError) as err: + raise TypeError(f"Wrong type {type(value)} for value {value}") from err + return new_value + + +def classes(*klasses) -> Callable: + """Evaluate if the tipo is a subclass of the klasses.""" + return lambda tipo: issubclass(tipo, klasses) + + +def _classes_and_not_datetimelike(*klasses) -> Callable: + """ + Evaluate if the tipo is a subclass of the klasses + and not a datetimelike. + """ + return lambda tipo: ( + issubclass(tipo, klasses) + and not issubclass(tipo, (np.datetime64, np.timedelta64)) + ) + + +def is_object_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the object dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the object dtype. + + Examples + -------- + >>> from pandas.api.types import is_object_dtype + >>> is_object_dtype(object) + True + >>> is_object_dtype(int) + False + >>> is_object_dtype(np.array([], dtype=object)) + True + >>> is_object_dtype(np.array([], dtype=int)) + False + >>> is_object_dtype([1, 2, 3]) + False + """ + return _is_dtype_type(arr_or_dtype, classes(np.object_)) + + +def is_sparse(arr) -> bool: + """ + Check whether an array-like is a 1-D pandas sparse array. + + .. deprecated:: 2.1.0 + Use isinstance(dtype, pd.SparseDtype) instead. + + Check that the one-dimensional array-like is a pandas sparse array. + Returns True if it is a pandas sparse array, not another type of + sparse array. + + Parameters + ---------- + arr : array-like + Array-like to check. + + Returns + ------- + bool + Whether or not the array-like is a pandas sparse array. + + Examples + -------- + Returns `True` if the parameter is a 1-D pandas sparse array. + + >>> from pandas.api.types import is_sparse + >>> is_sparse(pd.arrays.SparseArray([0, 0, 1, 0])) + True + >>> is_sparse(pd.Series(pd.arrays.SparseArray([0, 0, 1, 0]))) + True + + Returns `False` if the parameter is not sparse. + + >>> is_sparse(np.array([0, 0, 1, 0])) + False + >>> is_sparse(pd.Series([0, 1, 0, 0])) + False + + Returns `False` if the parameter is not a pandas sparse array. + + >>> from scipy.sparse import bsr_matrix + >>> is_sparse(bsr_matrix([0, 1, 0, 0])) + False + + Returns `False` if the parameter has more than one dimension. + """ + warnings.warn( + "is_sparse is deprecated and will be removed in a future " + "version. Check `isinstance(dtype, pd.SparseDtype)` instead.", + DeprecationWarning, + stacklevel=2, + ) + + dtype = getattr(arr, "dtype", arr) + return isinstance(dtype, SparseDtype) + + +def is_scipy_sparse(arr) -> bool: + """ + Check whether an array-like is a scipy.sparse.spmatrix instance. + + Parameters + ---------- + arr : array-like + The array-like to check. + + Returns + ------- + boolean + Whether or not the array-like is a scipy.sparse.spmatrix instance. + + Notes + ----- + If scipy is not installed, this function will always return False. + + Examples + -------- + >>> from scipy.sparse import bsr_matrix + >>> is_scipy_sparse(bsr_matrix([1, 2, 3])) + True + >>> is_scipy_sparse(pd.arrays.SparseArray([1, 2, 3])) + False + """ + global _is_scipy_sparse + + if _is_scipy_sparse is None: # pylint: disable=used-before-assignment + try: + from scipy.sparse import issparse as _is_scipy_sparse + except ImportError: + _is_scipy_sparse = lambda _: False + + assert _is_scipy_sparse is not None + return _is_scipy_sparse(arr) + + +def is_datetime64_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the datetime64 dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the datetime64 dtype. + + Examples + -------- + >>> from pandas.api.types import is_datetime64_dtype + >>> is_datetime64_dtype(object) + False + >>> is_datetime64_dtype(np.datetime64) + True + >>> is_datetime64_dtype(np.array([], dtype=int)) + False + >>> is_datetime64_dtype(np.array([], dtype=np.datetime64)) + True + >>> is_datetime64_dtype([1, 2, 3]) + False + """ + if isinstance(arr_or_dtype, np.dtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.kind == "M" + return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) + + +def is_datetime64tz_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of a DatetimeTZDtype dtype. + + .. deprecated:: 2.1.0 + Use isinstance(dtype, pd.DatetimeTZDtype) instead. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of a DatetimeTZDtype dtype. + + Examples + -------- + >>> from pandas.api.types import is_datetime64tz_dtype + >>> is_datetime64tz_dtype(object) + False + >>> is_datetime64tz_dtype([1, 2, 3]) + False + >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3])) # tz-naive + False + >>> is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) + True + + >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype + >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") + >>> s = pd.Series([], dtype=dtype) + >>> is_datetime64tz_dtype(dtype) + True + >>> is_datetime64tz_dtype(s) + True + """ + # GH#52607 + warnings.warn( + "is_datetime64tz_dtype is deprecated and will be removed in a future " + "version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(arr_or_dtype, DatetimeTZDtype): + # GH#33400 fastpath for dtype object + # GH 34986 + return True + + if arr_or_dtype is None: + return False + return DatetimeTZDtype.is_dtype(arr_or_dtype) + + +def is_timedelta64_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the timedelta64 dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the timedelta64 dtype. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_timedelta64_dtype + >>> is_timedelta64_dtype(object) + False + >>> is_timedelta64_dtype(np.timedelta64) + True + >>> is_timedelta64_dtype([1, 2, 3]) + False + >>> is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]")) + True + >>> is_timedelta64_dtype('0 days') + False + """ + if isinstance(arr_or_dtype, np.dtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.kind == "m" + + return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) + + +def is_period_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the Period dtype. + + .. deprecated:: 2.2.0 + Use isinstance(dtype, pd.Period) instead. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the Period dtype. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_period_dtype + >>> is_period_dtype(object) + False + >>> is_period_dtype(pd.PeriodDtype(freq="D")) + True + >>> is_period_dtype([1, 2, 3]) + False + >>> is_period_dtype(pd.Period("2017-01-01")) + False + >>> is_period_dtype(pd.PeriodIndex([], freq="Y")) + True + """ + warnings.warn( + "is_period_dtype is deprecated and will be removed in a future version. " + "Use `isinstance(dtype, pd.PeriodDtype)` instead", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(arr_or_dtype, ExtensionDtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.type is Period + + if arr_or_dtype is None: + return False + return PeriodDtype.is_dtype(arr_or_dtype) + + +def is_interval_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the Interval dtype. + + .. deprecated:: 2.2.0 + Use isinstance(dtype, pd.IntervalDtype) instead. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the Interval dtype. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_interval_dtype + >>> is_interval_dtype(object) + False + >>> is_interval_dtype(pd.IntervalDtype()) + True + >>> is_interval_dtype([1, 2, 3]) + False + >>> + >>> interval = pd.Interval(1, 2, closed="right") + >>> is_interval_dtype(interval) + False + >>> is_interval_dtype(pd.IntervalIndex([interval])) + True + """ + # GH#52607 + warnings.warn( + "is_interval_dtype is deprecated and will be removed in a future version. " + "Use `isinstance(dtype, pd.IntervalDtype)` instead", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(arr_or_dtype, ExtensionDtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.type is Interval + + if arr_or_dtype is None: + return False + return IntervalDtype.is_dtype(arr_or_dtype) + + +def is_categorical_dtype(arr_or_dtype) -> bool: + """ + Check whether an array-like or dtype is of the Categorical dtype. + + .. deprecated:: 2.2.0 + Use isinstance(dtype, pd.CategoricalDtype) instead. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype to check. + + Returns + ------- + boolean + Whether or not the array-like or dtype is of the Categorical dtype. + + Examples + -------- + >>> from pandas.api.types import is_categorical_dtype + >>> from pandas import CategoricalDtype + >>> is_categorical_dtype(object) + False + >>> is_categorical_dtype(CategoricalDtype()) + True + >>> is_categorical_dtype([1, 2, 3]) + False + >>> is_categorical_dtype(pd.Categorical([1, 2, 3])) + True + >>> is_categorical_dtype(pd.CategoricalIndex([1, 2, 3])) + True + """ + # GH#52527 + warnings.warn( + "is_categorical_dtype is deprecated and will be removed in a future " + "version. Use isinstance(dtype, pd.CategoricalDtype) instead", + DeprecationWarning, + stacklevel=2, + ) + if isinstance(arr_or_dtype, ExtensionDtype): + # GH#33400 fastpath for dtype object + return arr_or_dtype.name == "category" + + if arr_or_dtype is None: + return False + return CategoricalDtype.is_dtype(arr_or_dtype) + + +def is_string_or_object_np_dtype(dtype: np.dtype) -> bool: + """ + Faster alternative to is_string_dtype, assumes we have a np.dtype object. + """ + return dtype == object or dtype.kind in "SU" + + +def is_string_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the string dtype. + + If an array is passed with an object dtype, the elements must be + inferred as strings. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of the string dtype. + + Examples + -------- + >>> from pandas.api.types import is_string_dtype + >>> is_string_dtype(str) + True + >>> is_string_dtype(object) + True + >>> is_string_dtype(int) + False + >>> is_string_dtype(np.array(['a', 'b'])) + True + >>> is_string_dtype(pd.Series([1, 2])) + False + >>> is_string_dtype(pd.Series([1, 2], dtype=object)) + False + """ + if hasattr(arr_or_dtype, "dtype") and _get_dtype(arr_or_dtype).kind == "O": + return is_all_strings(arr_or_dtype) + + def condition(dtype) -> bool: + if is_string_or_object_np_dtype(dtype): + return True + try: + return dtype == "string" + except TypeError: + return False + + return _is_dtype(arr_or_dtype, condition) + + +def is_dtype_equal(source, target) -> bool: + """ + Check if two dtypes are equal. + + Parameters + ---------- + source : The first dtype to compare + target : The second dtype to compare + + Returns + ------- + boolean + Whether or not the two dtypes are equal. + + Examples + -------- + >>> is_dtype_equal(int, float) + False + >>> is_dtype_equal("int", int) + True + >>> is_dtype_equal(object, "category") + False + >>> is_dtype_equal(CategoricalDtype(), "category") + True + >>> is_dtype_equal(DatetimeTZDtype(tz="UTC"), "datetime64") + False + """ + if isinstance(target, str): + if not isinstance(source, str): + # GH#38516 ensure we get the same behavior from + # is_dtype_equal(CDT, "category") and CDT == "category" + try: + src = _get_dtype(source) + if isinstance(src, ExtensionDtype): + return src == target + except (TypeError, AttributeError, ImportError): + return False + elif isinstance(source, str): + return is_dtype_equal(target, source) + + try: + source = _get_dtype(source) + target = _get_dtype(target) + return source == target + except (TypeError, AttributeError, ImportError): + # invalid comparison + # object == category will hit this + return False + + +def is_integer_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of an integer dtype. + + Unlike in `is_any_int_dtype`, timedelta64 instances will return False. + + The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered + as integer by this function. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of an integer dtype and + not an instance of timedelta64. + + Examples + -------- + >>> from pandas.api.types import is_integer_dtype + >>> is_integer_dtype(str) + False + >>> is_integer_dtype(int) + True + >>> is_integer_dtype(float) + False + >>> is_integer_dtype(np.uint64) + True + >>> is_integer_dtype('int8') + True + >>> is_integer_dtype('Int8') + True + >>> is_integer_dtype(pd.Int8Dtype) + True + >>> is_integer_dtype(np.datetime64) + False + >>> is_integer_dtype(np.timedelta64) + False + >>> is_integer_dtype(np.array(['a', 'b'])) + False + >>> is_integer_dtype(pd.Series([1, 2])) + True + >>> is_integer_dtype(np.array([], dtype=np.timedelta64)) + False + >>> is_integer_dtype(pd.Index([1, 2.])) # float + False + """ + return _is_dtype_type( + arr_or_dtype, _classes_and_not_datetimelike(np.integer) + ) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "iu" + ) + + +def is_signed_integer_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a signed integer dtype. + + Unlike in `is_any_int_dtype`, timedelta64 instances will return False. + + The nullable Integer dtypes (e.g. pandas.Int64Dtype) are also considered + as integer by this function. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a signed integer dtype + and not an instance of timedelta64. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_signed_integer_dtype + >>> is_signed_integer_dtype(str) + False + >>> is_signed_integer_dtype(int) + True + >>> is_signed_integer_dtype(float) + False + >>> is_signed_integer_dtype(np.uint64) # unsigned + False + >>> is_signed_integer_dtype('int8') + True + >>> is_signed_integer_dtype('Int8') + True + >>> is_signed_integer_dtype(pd.Int8Dtype) + True + >>> is_signed_integer_dtype(np.datetime64) + False + >>> is_signed_integer_dtype(np.timedelta64) + False + >>> is_signed_integer_dtype(np.array(['a', 'b'])) + False + >>> is_signed_integer_dtype(pd.Series([1, 2])) + True + >>> is_signed_integer_dtype(np.array([], dtype=np.timedelta64)) + False + >>> is_signed_integer_dtype(pd.Index([1, 2.])) # float + False + >>> is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned + False + """ + return _is_dtype_type( + arr_or_dtype, _classes_and_not_datetimelike(np.signedinteger) + ) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "i" + ) + + +def is_unsigned_integer_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of an unsigned integer dtype. + + The nullable Integer dtypes (e.g. pandas.UInt64Dtype) are also + considered as integer by this function. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of an unsigned integer dtype. + + Examples + -------- + >>> from pandas.api.types import is_unsigned_integer_dtype + >>> is_unsigned_integer_dtype(str) + False + >>> is_unsigned_integer_dtype(int) # signed + False + >>> is_unsigned_integer_dtype(float) + False + >>> is_unsigned_integer_dtype(np.uint64) + True + >>> is_unsigned_integer_dtype('uint8') + True + >>> is_unsigned_integer_dtype('UInt8') + True + >>> is_unsigned_integer_dtype(pd.UInt8Dtype) + True + >>> is_unsigned_integer_dtype(np.array(['a', 'b'])) + False + >>> is_unsigned_integer_dtype(pd.Series([1, 2])) # signed + False + >>> is_unsigned_integer_dtype(pd.Index([1, 2.])) # float + False + >>> is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32)) + True + """ + return _is_dtype_type( + arr_or_dtype, _classes_and_not_datetimelike(np.unsignedinteger) + ) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == "u" + ) + + +def is_int64_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the int64 dtype. + + .. deprecated:: 2.1.0 + + is_int64_dtype is deprecated and will be removed in a future + version. Use dtype == np.int64 instead. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of the int64 dtype. + + Notes + ----- + Depending on system architecture, the return value of `is_int64_dtype( + int)` will be True if the OS uses 64-bit integers and False if the OS + uses 32-bit integers. + + Examples + -------- + >>> from pandas.api.types import is_int64_dtype + >>> is_int64_dtype(str) # doctest: +SKIP + False + >>> is_int64_dtype(np.int32) # doctest: +SKIP + False + >>> is_int64_dtype(np.int64) # doctest: +SKIP + True + >>> is_int64_dtype('int8') # doctest: +SKIP + False + >>> is_int64_dtype('Int8') # doctest: +SKIP + False + >>> is_int64_dtype(pd.Int64Dtype) # doctest: +SKIP + True + >>> is_int64_dtype(float) # doctest: +SKIP + False + >>> is_int64_dtype(np.uint64) # unsigned # doctest: +SKIP + False + >>> is_int64_dtype(np.array(['a', 'b'])) # doctest: +SKIP + False + >>> is_int64_dtype(np.array([1, 2], dtype=np.int64)) # doctest: +SKIP + True + >>> is_int64_dtype(pd.Index([1, 2.])) # float # doctest: +SKIP + False + >>> is_int64_dtype(np.array([1, 2], dtype=np.uint32)) # unsigned # doctest: +SKIP + False + """ + # GH#52564 + warnings.warn( + "is_int64_dtype is deprecated and will be removed in a future " + "version. Use dtype == np.int64 instead.", + DeprecationWarning, + stacklevel=2, + ) + return _is_dtype_type(arr_or_dtype, classes(np.int64)) + + +def is_datetime64_any_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the datetime64 dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + bool + Whether or not the array or dtype is of the datetime64 dtype. + + Examples + -------- + >>> from pandas.api.types import is_datetime64_any_dtype + >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype + >>> is_datetime64_any_dtype(str) + False + >>> is_datetime64_any_dtype(int) + False + >>> is_datetime64_any_dtype(np.datetime64) # can be tz-naive + True + >>> is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern")) + True + >>> is_datetime64_any_dtype(np.array(['a', 'b'])) + False + >>> is_datetime64_any_dtype(np.array([1, 2])) + False + >>> is_datetime64_any_dtype(np.array([], dtype="datetime64[ns]")) + True + >>> is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]")) + True + """ + if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)): + # GH#33400 fastpath for dtype object + return arr_or_dtype.kind == "M" + + if arr_or_dtype is None: + return False + + try: + tipo = _get_dtype(arr_or_dtype) + except TypeError: + return False + return lib.is_np_dtype(tipo, "M") or isinstance(tipo, DatetimeTZDtype) + + +def is_datetime64_ns_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the datetime64[ns] dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + bool + Whether or not the array or dtype is of the datetime64[ns] dtype. + + Examples + -------- + >>> from pandas.api.types import is_datetime64_ns_dtype + >>> from pandas.core.dtypes.dtypes import DatetimeTZDtype + >>> is_datetime64_ns_dtype(str) + False + >>> is_datetime64_ns_dtype(int) + False + >>> is_datetime64_ns_dtype(np.datetime64) # no unit + False + >>> is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern")) + True + >>> is_datetime64_ns_dtype(np.array(['a', 'b'])) + False + >>> is_datetime64_ns_dtype(np.array([1, 2])) + False + >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64")) # no unit + False + >>> is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]")) # wrong unit + False + >>> is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]")) + True + """ + if arr_or_dtype is None: + return False + try: + tipo = _get_dtype(arr_or_dtype) + except TypeError: + return False + return tipo == DT64NS_DTYPE or ( + isinstance(tipo, DatetimeTZDtype) and tipo.unit == "ns" + ) + + +def is_timedelta64_ns_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of the timedelta64[ns] dtype. + + This is a very specific dtype, so generic ones like `np.timedelta64` + will return False if passed into this function. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of the timedelta64[ns] dtype. + + Examples + -------- + >>> from pandas.core.dtypes.common import is_timedelta64_ns_dtype + >>> is_timedelta64_ns_dtype(np.dtype('m8[ns]')) + True + >>> is_timedelta64_ns_dtype(np.dtype('m8[ps]')) # Wrong frequency + False + >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]')) + True + >>> is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64)) + False + """ + return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE) + + +# This exists to silence numpy deprecation warnings, see GH#29553 +def is_numeric_v_string_like(a: ArrayLike, b) -> bool: + """ + Check if we are comparing a string-like object to a numeric ndarray. + NumPy doesn't like to compare such objects, especially numeric arrays + and scalar string-likes. + + Parameters + ---------- + a : array-like, scalar + The first object to check. + b : array-like, scalar + The second object to check. + + Returns + ------- + boolean + Whether we return a comparing a string-like object to a numeric array. + + Examples + -------- + >>> is_numeric_v_string_like(np.array([1]), "foo") + True + >>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"])) + True + >>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2])) + True + >>> is_numeric_v_string_like(np.array([1]), np.array([2])) + False + >>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"])) + False + """ + is_a_array = isinstance(a, np.ndarray) + is_b_array = isinstance(b, np.ndarray) + + is_a_numeric_array = is_a_array and a.dtype.kind in ("u", "i", "f", "c", "b") + is_b_numeric_array = is_b_array and b.dtype.kind in ("u", "i", "f", "c", "b") + is_a_string_array = is_a_array and a.dtype.kind in ("S", "U") + is_b_string_array = is_b_array and b.dtype.kind in ("S", "U") + + is_b_scalar_string_like = not is_b_array and isinstance(b, str) + + return ( + (is_a_numeric_array and is_b_scalar_string_like) + or (is_a_numeric_array and is_b_string_array) + or (is_b_numeric_array and is_a_string_array) + ) + + +def needs_i8_conversion(dtype: DtypeObj | None) -> bool: + """ + Check whether the dtype should be converted to int64. + + Dtype "needs" such a conversion if the dtype is of a datetime-like dtype + + Parameters + ---------- + dtype : np.dtype, ExtensionDtype, or None + + Returns + ------- + boolean + Whether or not the dtype should be converted to int64. + + Examples + -------- + >>> needs_i8_conversion(str) + False + >>> needs_i8_conversion(np.int64) + False + >>> needs_i8_conversion(np.datetime64) + False + >>> needs_i8_conversion(np.dtype(np.datetime64)) + True + >>> needs_i8_conversion(np.array(['a', 'b'])) + False + >>> needs_i8_conversion(pd.Series([1, 2])) + False + >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) + False + >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) + False + >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern").dtype) + True + """ + if isinstance(dtype, np.dtype): + return dtype.kind in "mM" + return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) + + +def is_numeric_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a numeric dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a numeric dtype. + + Examples + -------- + >>> from pandas.api.types import is_numeric_dtype + >>> is_numeric_dtype(str) + False + >>> is_numeric_dtype(int) + True + >>> is_numeric_dtype(float) + True + >>> is_numeric_dtype(np.uint64) + True + >>> is_numeric_dtype(np.datetime64) + False + >>> is_numeric_dtype(np.timedelta64) + False + >>> is_numeric_dtype(np.array(['a', 'b'])) + False + >>> is_numeric_dtype(pd.Series([1, 2])) + True + >>> is_numeric_dtype(pd.Index([1, 2.])) + True + >>> is_numeric_dtype(np.array([], dtype=np.timedelta64)) + False + """ + return _is_dtype_type( + arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_) + ) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric + ) + + +def is_any_real_numeric_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a real number dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a real number dtype. + + Examples + -------- + >>> from pandas.api.types import is_any_real_numeric_dtype + >>> is_any_real_numeric_dtype(int) + True + >>> is_any_real_numeric_dtype(float) + True + >>> is_any_real_numeric_dtype(object) + False + >>> is_any_real_numeric_dtype(str) + False + >>> is_any_real_numeric_dtype(complex(1, 2)) + False + >>> is_any_real_numeric_dtype(bool) + False + """ + return ( + is_numeric_dtype(arr_or_dtype) + and not is_complex_dtype(arr_or_dtype) + and not is_bool_dtype(arr_or_dtype) + ) + + +def is_float_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a float dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a float dtype. + + Examples + -------- + >>> from pandas.api.types import is_float_dtype + >>> is_float_dtype(str) + False + >>> is_float_dtype(int) + False + >>> is_float_dtype(float) + True + >>> is_float_dtype(np.array(['a', 'b'])) + False + >>> is_float_dtype(pd.Series([1, 2])) + False + >>> is_float_dtype(pd.Index([1, 2.])) + True + """ + return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype( + arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in "f" + ) + + +def is_bool_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a boolean dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a boolean dtype. + + Notes + ----- + An ExtensionArray is considered boolean when the ``_is_boolean`` + attribute is set to True. + + Examples + -------- + >>> from pandas.api.types import is_bool_dtype + >>> is_bool_dtype(str) + False + >>> is_bool_dtype(int) + False + >>> is_bool_dtype(bool) + True + >>> is_bool_dtype(np.bool_) + True + >>> is_bool_dtype(np.array(['a', 'b'])) + False + >>> is_bool_dtype(pd.Series([1, 2])) + False + >>> is_bool_dtype(np.array([True, False])) + True + >>> is_bool_dtype(pd.Categorical([True, False])) + True + >>> is_bool_dtype(pd.arrays.SparseArray([True, False])) + True + """ + if arr_or_dtype is None: + return False + try: + dtype = _get_dtype(arr_or_dtype) + except (TypeError, ValueError): + return False + + if isinstance(dtype, CategoricalDtype): + arr_or_dtype = dtype.categories + # now we use the special definition for Index + + if isinstance(arr_or_dtype, ABCIndex): + # Allow Index[object] that is all-bools or Index["boolean"] + if arr_or_dtype.inferred_type == "boolean": + if not is_bool_dtype(arr_or_dtype.dtype): + # GH#52680 + warnings.warn( + "The behavior of is_bool_dtype with an object-dtype Index " + "of bool objects is deprecated. In a future version, " + "this will return False. Cast the Index to a bool dtype instead.", + DeprecationWarning, + stacklevel=2, + ) + return True + return False + elif isinstance(dtype, ExtensionDtype): + return getattr(dtype, "_is_boolean", False) + + return issubclass(dtype.type, np.bool_) + + +def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool: + """ + Analogue to is_extension_array_dtype but excluding DatetimeTZDtype. + """ + return isinstance(dtype, ExtensionDtype) and not dtype._supports_2d + + +def is_extension_array_dtype(arr_or_dtype) -> bool: + """ + Check if an object is a pandas extension array type. + + See the :ref:`Use Guide ` for more. + + Parameters + ---------- + arr_or_dtype : object + For array-like input, the ``.dtype`` attribute will + be extracted. + + Returns + ------- + bool + Whether the `arr_or_dtype` is an extension array type. + + Notes + ----- + This checks whether an object implements the pandas extension + array interface. In pandas, this includes: + + * Categorical + * Sparse + * Interval + * Period + * DatetimeArray + * TimedeltaArray + + Third-party libraries may implement arrays or types satisfying + this interface as well. + + Examples + -------- + >>> from pandas.api.types import is_extension_array_dtype + >>> arr = pd.Categorical(['a', 'b']) + >>> is_extension_array_dtype(arr) + True + >>> is_extension_array_dtype(arr.dtype) + True + + >>> arr = np.array(['a', 'b']) + >>> is_extension_array_dtype(arr.dtype) + False + """ + dtype = getattr(arr_or_dtype, "dtype", arr_or_dtype) + if isinstance(dtype, ExtensionDtype): + return True + elif isinstance(dtype, np.dtype): + return False + else: + return registry.find(dtype) is not None + + +def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool: + """ + Check for ExtensionDtype, datetime64 dtype, or timedelta64 dtype. + + Notes + ----- + Checks only for dtype objects, not dtype-castable strings or types. + """ + return isinstance(dtype, ExtensionDtype) or (lib.is_np_dtype(dtype, "mM")) + + +def is_complex_dtype(arr_or_dtype) -> bool: + """ + Check whether the provided array or dtype is of a complex dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array or dtype to check. + + Returns + ------- + boolean + Whether or not the array or dtype is of a complex dtype. + + Examples + -------- + >>> from pandas.api.types import is_complex_dtype + >>> is_complex_dtype(str) + False + >>> is_complex_dtype(int) + False + >>> is_complex_dtype(np.complex128) + True + >>> is_complex_dtype(np.array(['a', 'b'])) + False + >>> is_complex_dtype(pd.Series([1, 2])) + False + >>> is_complex_dtype(np.array([1 + 1j, 5])) + True + """ + return _is_dtype_type(arr_or_dtype, classes(np.complexfloating)) + + +def _is_dtype(arr_or_dtype, condition) -> bool: + """ + Return true if the condition is satisfied for the arr_or_dtype. + + Parameters + ---------- + arr_or_dtype : array-like, str, np.dtype, or ExtensionArrayType + The array-like or dtype object whose dtype we want to extract. + condition : callable[Union[np.dtype, ExtensionDtype]] + + Returns + ------- + bool + + """ + if arr_or_dtype is None: + return False + try: + dtype = _get_dtype(arr_or_dtype) + except (TypeError, ValueError): + return False + return condition(dtype) + + +def _get_dtype(arr_or_dtype) -> DtypeObj: + """ + Get the dtype instance associated with an array + or dtype object. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype object whose dtype we want to extract. + + Returns + ------- + obj_dtype : The extract dtype instance from the + passed in array or dtype object. + + Raises + ------ + TypeError : The passed in object is None. + """ + if arr_or_dtype is None: + raise TypeError("Cannot deduce dtype from null object") + + # fastpath + if isinstance(arr_or_dtype, np.dtype): + return arr_or_dtype + elif isinstance(arr_or_dtype, type): + return np.dtype(arr_or_dtype) + + # if we have an array-like + elif hasattr(arr_or_dtype, "dtype"): + arr_or_dtype = arr_or_dtype.dtype + + return pandas_dtype(arr_or_dtype) + + +def _is_dtype_type(arr_or_dtype, condition) -> bool: + """ + Return true if the condition is satisfied for the arr_or_dtype. + + Parameters + ---------- + arr_or_dtype : array-like or dtype + The array-like or dtype object whose dtype we want to extract. + condition : callable[Union[np.dtype, ExtensionDtypeType]] + + Returns + ------- + bool : if the condition is satisfied for the arr_or_dtype + """ + if arr_or_dtype is None: + return condition(type(None)) + + # fastpath + if isinstance(arr_or_dtype, np.dtype): + return condition(arr_or_dtype.type) + elif isinstance(arr_or_dtype, type): + if issubclass(arr_or_dtype, ExtensionDtype): + arr_or_dtype = arr_or_dtype.type + return condition(np.dtype(arr_or_dtype).type) + + # if we have an array-like + if hasattr(arr_or_dtype, "dtype"): + arr_or_dtype = arr_or_dtype.dtype + + # we are not possibly a dtype + elif is_list_like(arr_or_dtype): + return condition(type(None)) + + try: + tipo = pandas_dtype(arr_or_dtype).type + except (TypeError, ValueError): + if is_scalar(arr_or_dtype): + return condition(type(None)) + + return False + + return condition(tipo) + + +def infer_dtype_from_object(dtype) -> type: + """ + Get a numpy dtype.type-style object for a dtype object. + + This methods also includes handling of the datetime64[ns] and + datetime64[ns, TZ] objects. + + If no dtype can be found, we return ``object``. + + Parameters + ---------- + dtype : dtype, type + The dtype object whose numpy dtype.type-style + object we want to extract. + + Returns + ------- + type + """ + if isinstance(dtype, type) and issubclass(dtype, np.generic): + # Type object from a dtype + + return dtype + elif isinstance(dtype, (np.dtype, ExtensionDtype)): + # dtype object + try: + _validate_date_like_dtype(dtype) + except TypeError: + # Should still pass if we don't have a date-like + pass + if hasattr(dtype, "numpy_dtype"): + # TODO: Implement this properly + # https://github.com/pandas-dev/pandas/issues/52576 + return dtype.numpy_dtype.type + return dtype.type + + try: + dtype = pandas_dtype(dtype) + except TypeError: + pass + + if isinstance(dtype, ExtensionDtype): + return dtype.type + elif isinstance(dtype, str): + # TODO(jreback) + # should deprecate these + if dtype in ["datetimetz", "datetime64tz"]: + return DatetimeTZDtype.type + elif dtype in ["period"]: + raise NotImplementedError + + if dtype in ["datetime", "timedelta"]: + dtype += "64" + try: + return infer_dtype_from_object(getattr(np, dtype)) + except (AttributeError, TypeError): + # Handles cases like _get_dtype(int) i.e., + # Python objects that are valid dtypes + # (unlike user-defined types, in general) + # + # TypeError handles the float16 type code of 'e' + # further handle internal types + pass + + return infer_dtype_from_object(np.dtype(dtype)) + + +def _validate_date_like_dtype(dtype) -> None: + """ + Check whether the dtype is a date-like dtype. Raises an error if invalid. + + Parameters + ---------- + dtype : dtype, type + The dtype to check. + + Raises + ------ + TypeError : The dtype could not be casted to a date-like dtype. + ValueError : The dtype is an illegal date-like dtype (e.g. the + frequency provided is too specific) + """ + try: + typ = np.datetime_data(dtype)[0] + except ValueError as e: + raise TypeError(e) from e + if typ not in ["generic", "ns"]: + raise ValueError( + f"{repr(dtype.name)} is too specific of a frequency, " + f"try passing {repr(dtype.type.__name__)}" + ) + + +def validate_all_hashable(*args, error_name: str | None = None) -> None: + """ + Return None if all args are hashable, else raise a TypeError. + + Parameters + ---------- + *args + Arguments to validate. + error_name : str, optional + The name to use if error + + Raises + ------ + TypeError : If an argument is not hashable + + Returns + ------- + None + """ + if not all(is_hashable(arg) for arg in args): + if error_name: + raise TypeError(f"{error_name} must be a hashable type") + raise TypeError("All elements must be hashable") + + +def pandas_dtype(dtype) -> DtypeObj: + """ + Convert input into a pandas only dtype object or a numpy dtype object. + + Parameters + ---------- + dtype : object to be converted + + Returns + ------- + np.dtype or a pandas dtype + + Raises + ------ + TypeError if not a dtype + + Examples + -------- + >>> pd.api.types.pandas_dtype(int) + dtype('int64') + """ + # short-circuit + if isinstance(dtype, np.ndarray): + return dtype.dtype + elif isinstance(dtype, (np.dtype, ExtensionDtype)): + return dtype + + # registered extension types + result = registry.find(dtype) + if result is not None: + if isinstance(result, type): + # GH 31356, GH 54592 + warnings.warn( + f"Instantiating {result.__name__} without any arguments." + f"Pass a {result.__name__} instance to silence this warning.", + UserWarning, + stacklevel=find_stack_level(), + ) + result = result() + return result + + # try a numpy dtype + # raise a consistent TypeError if failed + try: + with warnings.catch_warnings(): + # GH#51523 - Series.astype(np.integer) doesn't show + # numpy deprecation warning of np.integer + # Hence enabling DeprecationWarning + warnings.simplefilter("always", DeprecationWarning) + npdtype = np.dtype(dtype) + except SyntaxError as err: + # np.dtype uses `eval` which can raise SyntaxError + raise TypeError(f"data type '{dtype}' not understood") from err + + # Any invalid dtype (such as pd.Timestamp) should raise an error. + # np.dtype(invalid_type).kind = 0 for such objects. However, this will + # also catch some valid dtypes such as object, np.object_ and 'object' + # which we safeguard against by catching them earlier and returning + # np.dtype(valid_dtype) before this condition is evaluated. + if is_hashable(dtype) and dtype in [ + object, + np.object_, + "object", + "O", + "object_", + ]: + # check hashability to avoid errors/DeprecationWarning when we get + # here and `dtype` is an array + return npdtype + elif npdtype.kind == "O": + raise TypeError(f"dtype '{dtype}' not understood") + + return npdtype + + +def is_all_strings(value: ArrayLike) -> bool: + """ + Check if this is an array of strings that we should try parsing. + + Includes object-dtype ndarray containing all-strings, StringArray, + and Categorical with all-string categories. + Does not include numpy string dtypes. + """ + dtype = value.dtype + + if isinstance(dtype, np.dtype): + if len(value) == 0: + return dtype == np.dtype("object") + else: + return dtype == np.dtype("object") and lib.is_string_array( + np.asarray(value), skipna=False + ) + elif isinstance(dtype, CategoricalDtype): + return dtype.categories.inferred_type == "string" + return dtype == "string" + + +__all__ = [ + "classes", + "DT64NS_DTYPE", + "ensure_float64", + "ensure_python_int", + "ensure_str", + "infer_dtype_from_object", + "INT64_DTYPE", + "is_1d_only_ea_dtype", + "is_all_strings", + "is_any_real_numeric_dtype", + "is_array_like", + "is_bool", + "is_bool_dtype", + "is_categorical_dtype", + "is_complex", + "is_complex_dtype", + "is_dataclass", + "is_datetime64_any_dtype", + "is_datetime64_dtype", + "is_datetime64_ns_dtype", + "is_datetime64tz_dtype", + "is_decimal", + "is_dict_like", + "is_dtype_equal", + "is_ea_or_datetimelike_dtype", + "is_extension_array_dtype", + "is_file_like", + "is_float_dtype", + "is_int64_dtype", + "is_integer_dtype", + "is_interval", + "is_interval_dtype", + "is_iterator", + "is_named_tuple", + "is_nested_list_like", + "is_number", + "is_numeric_dtype", + "is_object_dtype", + "is_period_dtype", + "is_re", + "is_re_compilable", + "is_scipy_sparse", + "is_sequence", + "is_signed_integer_dtype", + "is_sparse", + "is_string_dtype", + "is_string_or_object_np_dtype", + "is_timedelta64_dtype", + "is_timedelta64_ns_dtype", + "is_unsigned_integer_dtype", + "needs_i8_conversion", + "pandas_dtype", + "TD64NS_DTYPE", + "validate_all_hashable", +] diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/concat.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/concat.py new file mode 100644 index 0000000000000000000000000000000000000000..9ec662a6cd3520aaa49fdc96142ad1b02bb518d8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/concat.py @@ -0,0 +1,348 @@ +""" +Utility functions related to concat. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.cast import ( + common_dtype_categorical_compat, + find_common_type, + np_find_common_type, +) +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.generic import ( + ABCCategoricalIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + ) + + from pandas.core.arrays import ( + Categorical, + ExtensionArray, + ) + + +def _is_nonempty(x, axis) -> bool: + # filter empty arrays + # 1-d dtypes always are included here + if x.ndim <= axis: + return True + return x.shape[axis] > 0 + + +def concat_compat( + to_concat: Sequence[ArrayLike], axis: AxisInt = 0, ea_compat_axis: bool = False +) -> ArrayLike: + """ + provide concatenation of an array of arrays each of which is a single + 'normalized' dtypes (in that for example, if it's object, then it is a + non-datetimelike and provide a combined dtype for the resulting array that + preserves the overall dtype if possible) + + Parameters + ---------- + to_concat : sequence of arrays + axis : axis to provide concatenation + ea_compat_axis : bool, default False + For ExtensionArray compat, behave as if axis == 1 when determining + whether to drop empty arrays. + + Returns + ------- + a single array, preserving the combined dtypes + """ + if len(to_concat) and lib.dtypes_all_equal([obj.dtype for obj in to_concat]): + # fastpath! + obj = to_concat[0] + if isinstance(obj, np.ndarray): + to_concat_arrs = cast("Sequence[np.ndarray]", to_concat) + return np.concatenate(to_concat_arrs, axis=axis) + + to_concat_eas = cast("Sequence[ExtensionArray]", to_concat) + if ea_compat_axis: + # We have 1D objects, that don't support axis keyword + return obj._concat_same_type(to_concat_eas) + elif axis == 0: + return obj._concat_same_type(to_concat_eas) + else: + # e.g. DatetimeArray + # NB: We are assuming here that ensure_wrapped_if_arraylike has + # been called where relevant. + return obj._concat_same_type( + # error: Unexpected keyword argument "axis" for "_concat_same_type" + # of "ExtensionArray" + to_concat_eas, + axis=axis, # type: ignore[call-arg] + ) + + # If all arrays are empty, there's nothing to convert, just short-cut to + # the concatenation, #3121. + # + # Creating an empty array directly is tempting, but the winnings would be + # marginal given that it would still require shape & dtype calculation and + # np.concatenate which has them both implemented is compiled. + orig = to_concat + non_empties = [x for x in to_concat if _is_nonempty(x, axis)] + if non_empties and axis == 0 and not ea_compat_axis: + # ea_compat_axis see GH#39574 + to_concat = non_empties + + any_ea, kinds, target_dtype = _get_result_dtype(to_concat, non_empties) + + if len(to_concat) < len(orig): + _, _, alt_dtype = _get_result_dtype(orig, non_empties) + if alt_dtype != target_dtype: + # GH#39122 + warnings.warn( + "The behavior of array concatenation with empty entries is " + "deprecated. In a future version, this will no longer exclude " + "empty items when determining the result dtype. " + "To retain the old behavior, exclude the empty entries before " + "the concat operation.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if target_dtype is not None: + to_concat = [astype_array(arr, target_dtype, copy=False) for arr in to_concat] + + if not isinstance(to_concat[0], np.ndarray): + # i.e. isinstance(to_concat[0], ExtensionArray) + to_concat_eas = cast("Sequence[ExtensionArray]", to_concat) + cls = type(to_concat[0]) + # GH#53640: eg. for datetime array, axis=1 but 0 is default + # However, class method `_concat_same_type()` for some classes + # may not support the `axis` keyword + if ea_compat_axis or axis == 0: + return cls._concat_same_type(to_concat_eas) + else: + return cls._concat_same_type( + to_concat_eas, + axis=axis, # type: ignore[call-arg] + ) + else: + to_concat_arrs = cast("Sequence[np.ndarray]", to_concat) + result = np.concatenate(to_concat_arrs, axis=axis) + + if not any_ea and "b" in kinds and result.dtype.kind in "iuf": + # GH#39817 cast to object instead of casting bools to numeric + result = result.astype(object, copy=False) + return result + + +def _get_result_dtype( + to_concat: Sequence[ArrayLike], non_empties: Sequence[ArrayLike] +) -> tuple[bool, set[str], DtypeObj | None]: + target_dtype = None + + dtypes = {obj.dtype for obj in to_concat} + kinds = {obj.dtype.kind for obj in to_concat} + + any_ea = any(not isinstance(x, np.ndarray) for x in to_concat) + if any_ea: + # i.e. any ExtensionArrays + + # we ignore axis here, as internally concatting with EAs is always + # for axis=0 + if len(dtypes) != 1: + target_dtype = find_common_type([x.dtype for x in to_concat]) + target_dtype = common_dtype_categorical_compat(to_concat, target_dtype) + + elif not len(non_empties): + # we have all empties, but may need to coerce the result dtype to + # object if we have non-numeric type operands (numpy would otherwise + # cast this to float) + if len(kinds) != 1: + if not len(kinds - {"i", "u", "f"}) or not len(kinds - {"b", "i", "u"}): + # let numpy coerce + pass + else: + # coerce to object + target_dtype = np.dtype(object) + kinds = {"o"} + else: + # error: Argument 1 to "np_find_common_type" has incompatible type + # "*Set[Union[ExtensionDtype, Any]]"; expected "dtype[Any]" + target_dtype = np_find_common_type(*dtypes) # type: ignore[arg-type] + + return any_ea, kinds, target_dtype + + +def union_categoricals( + to_union, sort_categories: bool = False, ignore_order: bool = False +) -> Categorical: + """ + Combine list-like of Categorical-like, unioning categories. + + All categories must have the same dtype. + + Parameters + ---------- + to_union : list-like + Categorical, CategoricalIndex, or Series with dtype='category'. + sort_categories : bool, default False + If true, resulting categories will be lexsorted, otherwise + they will be ordered as they appear in the data. + ignore_order : bool, default False + If true, the ordered attribute of the Categoricals will be ignored. + Results in an unordered categorical. + + Returns + ------- + Categorical + + Raises + ------ + TypeError + - all inputs do not have the same dtype + - all inputs do not have the same ordered property + - all inputs are ordered and their categories are not identical + - sort_categories=True and Categoricals are ordered + ValueError + Empty list of categoricals passed + + Notes + ----- + To learn more about categories, see `link + `__ + + Examples + -------- + If you want to combine categoricals that do not necessarily have + the same categories, `union_categoricals` will combine a list-like + of categoricals. The new categories will be the union of the + categories being combined. + + >>> a = pd.Categorical(["b", "c"]) + >>> b = pd.Categorical(["a", "b"]) + >>> pd.api.types.union_categoricals([a, b]) + ['b', 'c', 'a', 'b'] + Categories (3, object): ['b', 'c', 'a'] + + By default, the resulting categories will be ordered as they appear + in the `categories` of the data. If you want the categories to be + lexsorted, use `sort_categories=True` argument. + + >>> pd.api.types.union_categoricals([a, b], sort_categories=True) + ['b', 'c', 'a', 'b'] + Categories (3, object): ['a', 'b', 'c'] + + `union_categoricals` also works with the case of combining two + categoricals of the same categories and order information (e.g. what + you could also `append` for). + + >>> a = pd.Categorical(["a", "b"], ordered=True) + >>> b = pd.Categorical(["a", "b", "a"], ordered=True) + >>> pd.api.types.union_categoricals([a, b]) + ['a', 'b', 'a', 'b', 'a'] + Categories (2, object): ['a' < 'b'] + + Raises `TypeError` because the categories are ordered and not identical. + + >>> a = pd.Categorical(["a", "b"], ordered=True) + >>> b = pd.Categorical(["a", "b", "c"], ordered=True) + >>> pd.api.types.union_categoricals([a, b]) + Traceback (most recent call last): + ... + TypeError: to union ordered Categoricals, all categories must be the same + + Ordered categoricals with different categories or orderings can be + combined by using the `ignore_ordered=True` argument. + + >>> a = pd.Categorical(["a", "b", "c"], ordered=True) + >>> b = pd.Categorical(["c", "b", "a"], ordered=True) + >>> pd.api.types.union_categoricals([a, b], ignore_order=True) + ['a', 'b', 'c', 'c', 'b', 'a'] + Categories (3, object): ['a', 'b', 'c'] + + `union_categoricals` also works with a `CategoricalIndex`, or `Series` + containing categorical data, but note that the resulting array will + always be a plain `Categorical` + + >>> a = pd.Series(["b", "c"], dtype='category') + >>> b = pd.Series(["a", "b"], dtype='category') + >>> pd.api.types.union_categoricals([a, b]) + ['b', 'c', 'a', 'b'] + Categories (3, object): ['b', 'c', 'a'] + """ + from pandas import Categorical + from pandas.core.arrays.categorical import recode_for_categories + + if len(to_union) == 0: + raise ValueError("No Categoricals to union") + + def _maybe_unwrap(x): + if isinstance(x, (ABCCategoricalIndex, ABCSeries)): + return x._values + elif isinstance(x, Categorical): + return x + else: + raise TypeError("all components to combine must be Categorical") + + to_union = [_maybe_unwrap(x) for x in to_union] + first = to_union[0] + + if not lib.dtypes_all_equal([obj.categories.dtype for obj in to_union]): + raise TypeError("dtype of categories must be the same") + + ordered = False + if all(first._categories_match_up_to_permutation(other) for other in to_union[1:]): + # identical categories - fastpath + categories = first.categories + ordered = first.ordered + + all_codes = [first._encode_with_my_categories(x)._codes for x in to_union] + new_codes = np.concatenate(all_codes) + + if sort_categories and not ignore_order and ordered: + raise TypeError("Cannot use sort_categories=True with ordered Categoricals") + + if sort_categories and not categories.is_monotonic_increasing: + categories = categories.sort_values() + indexer = categories.get_indexer(first.categories) + + from pandas.core.algorithms import take_nd + + new_codes = take_nd(indexer, new_codes, fill_value=-1) + elif ignore_order or all(not c.ordered for c in to_union): + # different categories - union and recode + cats = first.categories.append([c.categories for c in to_union[1:]]) + categories = cats.unique() + if sort_categories: + categories = categories.sort_values() + + new_codes = [ + recode_for_categories(c.codes, c.categories, categories) for c in to_union + ] + new_codes = np.concatenate(new_codes) + else: + # ordered - to show a proper error message + if all(c.ordered for c in to_union): + msg = "to union ordered Categoricals, all categories must be the same" + raise TypeError(msg) + raise TypeError("Categorical.ordered must be the same") + + if ignore_order: + ordered = False + + dtype = CategoricalDtype(categories=categories, ordered=ordered) + return Categorical._simple_new(new_codes, dtype=dtype) diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..1c43ef55c11d792df6cbfc4b249caf8889c7af58 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/dtypes.py @@ -0,0 +1,2348 @@ +""" +Define extension dtypes. +""" +from __future__ import annotations + +from datetime import ( + date, + datetime, + time, + timedelta, +) +from decimal import Decimal +import re +from typing import ( + TYPE_CHECKING, + Any, + cast, +) +import warnings + +import numpy as np +import pytz + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas._libs.interval import Interval +from pandas._libs.properties import cache_readonly +from pandas._libs.tslibs import ( + BaseOffset, + NaT, + NaTType, + Period, + Timedelta, + Timestamp, + timezones, + to_offset, + tz_compare, +) +from pandas._libs.tslibs.dtypes import ( + PeriodDtypeBase, + abbrev_to_npy_unit, +) +from pandas._libs.tslibs.offsets import BDay +from pandas.compat import pa_version_under10p1 +from pandas.errors import PerformanceWarning +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.base import ( + ExtensionDtype, + StorageExtensionDtype, + register_extension_dtype, +) +from pandas.core.dtypes.generic import ( + ABCCategoricalIndex, + ABCIndex, + ABCRangeIndex, +) +from pandas.core.dtypes.inference import ( + is_bool, + is_list_like, +) + +from pandas.util import capitalize_first_letter + +if not pa_version_under10p1: + import pyarrow as pa + +if TYPE_CHECKING: + from collections.abc import MutableMapping + from datetime import tzinfo + + import pyarrow as pa # noqa: TCH004 + + from pandas._typing import ( + Dtype, + DtypeObj, + IntervalClosedType, + Ordered, + Self, + npt, + type_t, + ) + + from pandas import ( + Categorical, + CategoricalIndex, + DatetimeIndex, + Index, + IntervalIndex, + PeriodIndex, + ) + from pandas.core.arrays import ( + BaseMaskedArray, + DatetimeArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + ) + from pandas.core.arrays.arrow import ArrowExtensionArray + +str_type = str + + +class PandasExtensionDtype(ExtensionDtype): + """ + A np.dtype duck-typed class, suitable for holding a custom dtype. + + THIS IS NOT A REAL NUMPY DTYPE + """ + + type: Any + kind: Any + # The Any type annotations above are here only because mypy seems to have a + # problem dealing with multiple inheritance from PandasExtensionDtype + # and ExtensionDtype's @properties in the subclasses below. The kind and + # type variables in those subclasses are explicitly typed below. + subdtype = None + str: str_type + num = 100 + shape: tuple[int, ...] = () + itemsize = 8 + base: DtypeObj | None = None + isbuiltin = 0 + isnative = 0 + _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} + + def __repr__(self) -> str_type: + """ + Return a string representation for a particular object. + """ + return str(self) + + def __hash__(self) -> int: + raise NotImplementedError("sub-classes should implement an __hash__ method") + + def __getstate__(self) -> dict[str_type, Any]: + # pickle support; we don't want to pickle the cache + return {k: getattr(self, k, None) for k in self._metadata} + + @classmethod + def reset_cache(cls) -> None: + """clear the cache""" + cls._cache_dtypes = {} + + +class CategoricalDtypeType(type): + """ + the type of CategoricalDtype, this metaclass determines subclass ability + """ + + +@register_extension_dtype +class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): + """ + Type for categorical data with the categories and orderedness. + + Parameters + ---------- + categories : sequence, optional + Must be unique, and must not contain any nulls. + The categories are stored in an Index, + and if an index is provided the dtype of that index will be used. + ordered : bool or None, default False + Whether or not this categorical is treated as a ordered categorical. + None can be used to maintain the ordered value of existing categoricals when + used in operations that combine categoricals, e.g. astype, and will resolve to + False if there is no existing ordered to maintain. + + Attributes + ---------- + categories + ordered + + Methods + ------- + None + + See Also + -------- + Categorical : Represent a categorical variable in classic R / S-plus fashion. + + Notes + ----- + This class is useful for specifying the type of a ``Categorical`` + independent of the values. See :ref:`categorical.categoricaldtype` + for more. + + Examples + -------- + >>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True) + >>> pd.Series(['a', 'b', 'a', 'c'], dtype=t) + 0 a + 1 b + 2 a + 3 NaN + dtype: category + Categories (2, object): ['b' < 'a'] + + An empty CategoricalDtype with a specific dtype can be created + by providing an empty index. As follows, + + >>> pd.CategoricalDtype(pd.DatetimeIndex([])).categories.dtype + dtype(' None: + self._finalize(categories, ordered, fastpath=False) + + @classmethod + def _from_fastpath( + cls, categories=None, ordered: bool | None = None + ) -> CategoricalDtype: + self = cls.__new__(cls) + self._finalize(categories, ordered, fastpath=True) + return self + + @classmethod + def _from_categorical_dtype( + cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None = None + ) -> CategoricalDtype: + if categories is ordered is None: + return dtype + if categories is None: + categories = dtype.categories + if ordered is None: + ordered = dtype.ordered + return cls(categories, ordered) + + @classmethod + def _from_values_or_dtype( + cls, + values=None, + categories=None, + ordered: bool | None = None, + dtype: Dtype | None = None, + ) -> CategoricalDtype: + """ + Construct dtype from the input parameters used in :class:`Categorical`. + + This constructor method specifically does not do the factorization + step, if that is needed to find the categories. This constructor may + therefore return ``CategoricalDtype(categories=None, ordered=None)``, + which may not be useful. Additional steps may therefore have to be + taken to create the final dtype. + + The return dtype is specified from the inputs in this prioritized + order: + 1. if dtype is a CategoricalDtype, return dtype + 2. if dtype is the string 'category', create a CategoricalDtype from + the supplied categories and ordered parameters, and return that. + 3. if values is a categorical, use value.dtype, but override it with + categories and ordered if either/both of those are not None. + 4. if dtype is None and values is not a categorical, construct the + dtype from categories and ordered, even if either of those is None. + + Parameters + ---------- + values : list-like, optional + The list-like must be 1-dimensional. + categories : list-like, optional + Categories for the CategoricalDtype. + ordered : bool, optional + Designating if the categories are ordered. + dtype : CategoricalDtype or the string "category", optional + If ``CategoricalDtype``, cannot be used together with + `categories` or `ordered`. + + Returns + ------- + CategoricalDtype + + Examples + -------- + >>> pd.CategoricalDtype._from_values_or_dtype() + CategoricalDtype(categories=None, ordered=None, categories_dtype=None) + >>> pd.CategoricalDtype._from_values_or_dtype( + ... categories=['a', 'b'], ordered=True + ... ) + CategoricalDtype(categories=['a', 'b'], ordered=True, categories_dtype=object) + >>> dtype1 = pd.CategoricalDtype(['a', 'b'], ordered=True) + >>> dtype2 = pd.CategoricalDtype(['x', 'y'], ordered=False) + >>> c = pd.Categorical([0, 1], dtype=dtype1) + >>> pd.CategoricalDtype._from_values_or_dtype( + ... c, ['x', 'y'], ordered=True, dtype=dtype2 + ... ) + Traceback (most recent call last): + ... + ValueError: Cannot specify `categories` or `ordered` together with + `dtype`. + + The supplied dtype takes precedence over values' dtype: + + >>> pd.CategoricalDtype._from_values_or_dtype(c, dtype=dtype2) + CategoricalDtype(categories=['x', 'y'], ordered=False, categories_dtype=object) + """ + + if dtype is not None: + # The dtype argument takes precedence over values.dtype (if any) + if isinstance(dtype, str): + if dtype == "category": + if ordered is None and cls.is_dtype(values): + # GH#49309 preserve orderedness + ordered = values.dtype.ordered + + dtype = CategoricalDtype(categories, ordered) + else: + raise ValueError(f"Unknown dtype {repr(dtype)}") + elif categories is not None or ordered is not None: + raise ValueError( + "Cannot specify `categories` or `ordered` together with `dtype`." + ) + elif not isinstance(dtype, CategoricalDtype): + raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}") + elif cls.is_dtype(values): + # If no "dtype" was passed, use the one from "values", but honor + # the "ordered" and "categories" arguments + dtype = values.dtype._from_categorical_dtype( + values.dtype, categories, ordered + ) + else: + # If dtype=None and values is not categorical, create a new dtype. + # Note: This could potentially have categories=None and + # ordered=None. + dtype = CategoricalDtype(categories, ordered) + + return cast(CategoricalDtype, dtype) + + @classmethod + def construct_from_string(cls, string: str_type) -> CategoricalDtype: + """ + Construct a CategoricalDtype from a string. + + Parameters + ---------- + string : str + Must be the string "category" in order to be successfully constructed. + + Returns + ------- + CategoricalDtype + Instance of the dtype. + + Raises + ------ + TypeError + If a CategoricalDtype cannot be constructed from the input. + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + if string != cls.name: + raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'") + + # need ordered=None to ensure that operations specifying dtype="category" don't + # override the ordered value for existing categoricals + return cls(ordered=None) + + def _finalize(self, categories, ordered: Ordered, fastpath: bool = False) -> None: + if ordered is not None: + self.validate_ordered(ordered) + + if categories is not None: + categories = self.validate_categories(categories, fastpath=fastpath) + + self._categories = categories + self._ordered = ordered + + def __setstate__(self, state: MutableMapping[str_type, Any]) -> None: + # for pickle compat. __get_state__ is defined in the + # PandasExtensionDtype superclass and uses the public properties to + # pickle -> need to set the settable private ones here (see GH26067) + self._categories = state.pop("categories", None) + self._ordered = state.pop("ordered", False) + + def __hash__(self) -> int: + # _hash_categories returns a uint64, so use the negative + # space for when we have unknown categories to avoid a conflict + if self.categories is None: + if self.ordered: + return -1 + else: + return -2 + # We *do* want to include the real self.ordered here + return int(self._hash_categories) + + def __eq__(self, other: object) -> bool: + """ + Rules for CDT equality: + 1) Any CDT is equal to the string 'category' + 2) Any CDT is equal to itself + 3) Any CDT is equal to a CDT with categories=None regardless of ordered + 4) A CDT with ordered=True is only equal to another CDT with + ordered=True and identical categories in the same order + 5) A CDT with ordered={False, None} is only equal to another CDT with + ordered={False, None} and identical categories, but same order is + not required. There is no distinction between False/None. + 6) Any other comparison returns False + """ + if isinstance(other, str): + return other == self.name + elif other is self: + return True + elif not (hasattr(other, "ordered") and hasattr(other, "categories")): + return False + elif self.categories is None or other.categories is None: + # For non-fully-initialized dtypes, these are only equal to + # - the string "category" (handled above) + # - other CategoricalDtype with categories=None + return self.categories is other.categories + elif self.ordered or other.ordered: + # At least one has ordered=True; equal if both have ordered=True + # and the same values for categories in the same order. + return (self.ordered == other.ordered) and self.categories.equals( + other.categories + ) + else: + # Neither has ordered=True; equal if both have the same categories, + # but same order is not necessary. There is no distinction between + # ordered=False and ordered=None: CDT(., False) and CDT(., None) + # will be equal if they have the same categories. + left = self.categories + right = other.categories + + # GH#36280 the ordering of checks here is for performance + if not left.dtype == right.dtype: + return False + + if len(left) != len(right): + return False + + if self.categories.equals(other.categories): + # Check and see if they happen to be identical categories + return True + + if left.dtype != object: + # Faster than calculating hash + indexer = left.get_indexer(right) + # Because left and right have the same length and are unique, + # `indexer` not having any -1s implies that there is a + # bijection between `left` and `right`. + return (indexer != -1).all() + + # With object-dtype we need a comparison that identifies + # e.g. int(2) as distinct from float(2) + return set(left) == set(right) + + def __repr__(self) -> str_type: + if self.categories is None: + data = "None" + dtype = "None" + else: + data = self.categories._format_data(name=type(self).__name__) + if isinstance(self.categories, ABCRangeIndex): + data = str(self.categories._range) + data = data.rstrip(", ") + dtype = self.categories.dtype + + return ( + f"CategoricalDtype(categories={data}, ordered={self.ordered}, " + f"categories_dtype={dtype})" + ) + + @cache_readonly + def _hash_categories(self) -> int: + from pandas.core.util.hashing import ( + combine_hash_arrays, + hash_array, + hash_tuples, + ) + + categories = self.categories + ordered = self.ordered + + if len(categories) and isinstance(categories[0], tuple): + # assumes if any individual category is a tuple, then all our. ATM + # I don't really want to support just some of the categories being + # tuples. + cat_list = list(categories) # breaks if a np.array of categories + cat_array = hash_tuples(cat_list) + else: + if categories.dtype == "O" and len({type(x) for x in categories}) != 1: + # TODO: hash_array doesn't handle mixed types. It casts + # everything to a str first, which means we treat + # {'1', '2'} the same as {'1', 2} + # find a better solution + hashed = hash((tuple(categories), ordered)) + return hashed + + if DatetimeTZDtype.is_dtype(categories.dtype): + # Avoid future warning. + categories = categories.view("datetime64[ns]") + + cat_array = hash_array(np.asarray(categories), categorize=False) + if ordered: + cat_array = np.vstack( + [cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)] + ) + else: + cat_array = np.array([cat_array]) + combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) + return np.bitwise_xor.reduce(combined_hashed) + + @classmethod + def construct_array_type(cls) -> type_t[Categorical]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas import Categorical + + return Categorical + + @staticmethod + def validate_ordered(ordered: Ordered) -> None: + """ + Validates that we have a valid ordered parameter. If + it is not a boolean, a TypeError will be raised. + + Parameters + ---------- + ordered : object + The parameter to be verified. + + Raises + ------ + TypeError + If 'ordered' is not a boolean. + """ + if not is_bool(ordered): + raise TypeError("'ordered' must either be 'True' or 'False'") + + @staticmethod + def validate_categories(categories, fastpath: bool = False) -> Index: + """ + Validates that we have good categories + + Parameters + ---------- + categories : array-like + fastpath : bool + Whether to skip nan and uniqueness checks + + Returns + ------- + categories : Index + """ + from pandas.core.indexes.base import Index + + if not fastpath and not is_list_like(categories): + raise TypeError( + f"Parameter 'categories' must be list-like, was {repr(categories)}" + ) + if not isinstance(categories, ABCIndex): + categories = Index._with_infer(categories, tupleize_cols=False) + + if not fastpath: + if categories.hasnans: + raise ValueError("Categorical categories cannot be null") + + if not categories.is_unique: + raise ValueError("Categorical categories must be unique") + + if isinstance(categories, ABCCategoricalIndex): + categories = categories.categories + + return categories + + def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype: + """ + Returns a CategoricalDtype with categories and ordered taken from dtype + if specified, otherwise falling back to self if unspecified + + Parameters + ---------- + dtype : CategoricalDtype + + Returns + ------- + new_dtype : CategoricalDtype + """ + if isinstance(dtype, str) and dtype == "category": + # dtype='category' should not change anything + return self + elif not self.is_dtype(dtype): + raise ValueError( + f"a CategoricalDtype must be passed to perform an update, " + f"got {repr(dtype)}" + ) + else: + # from here on, dtype is a CategoricalDtype + dtype = cast(CategoricalDtype, dtype) + + # update categories/ordered unless they've been explicitly passed as None + new_categories = ( + dtype.categories if dtype.categories is not None else self.categories + ) + new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered + + return CategoricalDtype(new_categories, new_ordered) + + @property + def categories(self) -> Index: + """ + An ``Index`` containing the unique categories allowed. + + Examples + -------- + >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True) + >>> cat_type.categories + Index(['a', 'b'], dtype='object') + """ + return self._categories + + @property + def ordered(self) -> Ordered: + """ + Whether the categories have an ordered relationship. + + Examples + -------- + >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=True) + >>> cat_type.ordered + True + + >>> cat_type = pd.CategoricalDtype(categories=['a', 'b'], ordered=False) + >>> cat_type.ordered + False + """ + return self._ordered + + @property + def _is_boolean(self) -> bool: + from pandas.core.dtypes.common import is_bool_dtype + + return is_bool_dtype(self.categories) + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + # check if we have all categorical dtype with identical categories + if all(isinstance(x, CategoricalDtype) for x in dtypes): + first = dtypes[0] + if all(first == other for other in dtypes[1:]): + return first + + # special case non-initialized categorical + # TODO we should figure out the expected return value in general + non_init_cats = [ + isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes + ] + if all(non_init_cats): + return self + elif any(non_init_cats): + return None + + # categorical is aware of Sparse -> extract sparse subdtypes + dtypes = [x.subtype if isinstance(x, SparseDtype) else x for x in dtypes] + # extract the categories' dtype + non_cat_dtypes = [ + x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in dtypes + ] + # TODO should categorical always give an answer? + from pandas.core.dtypes.cast import find_common_type + + return find_common_type(non_cat_dtypes) + + @cache_readonly + def index_class(self) -> type_t[CategoricalIndex]: + from pandas import CategoricalIndex + + return CategoricalIndex + + +@register_extension_dtype +class DatetimeTZDtype(PandasExtensionDtype): + """ + An ExtensionDtype for timezone-aware datetime data. + + **This is not an actual numpy dtype**, but a duck type. + + Parameters + ---------- + unit : str, default "ns" + The precision of the datetime data. Currently limited + to ``"ns"``. + tz : str, int, or datetime.tzinfo + The timezone. + + Attributes + ---------- + unit + tz + + Methods + ------- + None + + Raises + ------ + ZoneInfoNotFoundError + When the requested timezone cannot be found. + + Examples + -------- + >>> from zoneinfo import ZoneInfo + >>> pd.DatetimeTZDtype(tz=ZoneInfo('UTC')) + datetime64[ns, UTC] + + >>> pd.DatetimeTZDtype(tz=ZoneInfo('Europe/Paris')) + datetime64[ns, Europe/Paris] + """ + + type: type[Timestamp] = Timestamp + kind: str_type = "M" + num = 101 + _metadata = ("unit", "tz") + _match = re.compile(r"(datetime64|M8)\[(?P.+), (?P.+)\]") + _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} + _supports_2d = True + _can_fast_transpose = True + + @property + def na_value(self) -> NaTType: + return NaT + + @cache_readonly + def base(self) -> DtypeObj: # type: ignore[override] + return np.dtype(f"M8[{self.unit}]") + + # error: Signature of "str" incompatible with supertype "PandasExtensionDtype" + @cache_readonly + def str(self) -> str: # type: ignore[override] + return f"|M8[{self.unit}]" + + def __init__(self, unit: str_type | DatetimeTZDtype = "ns", tz=None) -> None: + if isinstance(unit, DatetimeTZDtype): + # error: "str" has no attribute "tz" + unit, tz = unit.unit, unit.tz # type: ignore[attr-defined] + + if unit != "ns": + if isinstance(unit, str) and tz is None: + # maybe a string like datetime64[ns, tz], which we support for + # now. + result = type(self).construct_from_string(unit) + unit = result.unit + tz = result.tz + msg = ( + f"Passing a dtype alias like 'datetime64[ns, {tz}]' " + "to DatetimeTZDtype is no longer supported. Use " + "'DatetimeTZDtype.construct_from_string()' instead." + ) + raise ValueError(msg) + if unit not in ["s", "ms", "us", "ns"]: + raise ValueError("DatetimeTZDtype only supports s, ms, us, ns units") + + if tz: + tz = timezones.maybe_get_tz(tz) + tz = timezones.tz_standardize(tz) + elif tz is not None: + raise pytz.UnknownTimeZoneError(tz) + if tz is None: + raise TypeError("A 'tz' is required.") + + self._unit = unit + self._tz = tz + + @cache_readonly + def _creso(self) -> int: + """ + The NPY_DATETIMEUNIT corresponding to this dtype's resolution. + """ + return abbrev_to_npy_unit(self.unit) + + @property + def unit(self) -> str_type: + """ + The precision of the datetime data. + + Examples + -------- + >>> from zoneinfo import ZoneInfo + >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles')) + >>> dtype.unit + 'ns' + """ + return self._unit + + @property + def tz(self) -> tzinfo: + """ + The timezone. + + Examples + -------- + >>> from zoneinfo import ZoneInfo + >>> dtype = pd.DatetimeTZDtype(tz=ZoneInfo('America/Los_Angeles')) + >>> dtype.tz + zoneinfo.ZoneInfo(key='America/Los_Angeles') + """ + return self._tz + + @classmethod + def construct_array_type(cls) -> type_t[DatetimeArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays import DatetimeArray + + return DatetimeArray + + @classmethod + def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: + """ + Construct a DatetimeTZDtype from a string. + + Parameters + ---------- + string : str + The string alias for this DatetimeTZDtype. + Should be formatted like ``datetime64[ns, ]``, + where ```` is the timezone name. + + Examples + -------- + >>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]') + datetime64[ns, UTC] + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + + msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" + match = cls._match.match(string) + if match: + d = match.groupdict() + try: + return cls(unit=d["unit"], tz=d["tz"]) + except (KeyError, TypeError, ValueError) as err: + # KeyError if maybe_get_tz tries and fails to get a + # pytz timezone (actually pytz.UnknownTimeZoneError). + # TypeError if we pass a nonsense tz; + # ValueError if we pass a unit other than "ns" + raise TypeError(msg) from err + raise TypeError(msg) + + def __str__(self) -> str_type: + return f"datetime64[{self.unit}, {self.tz}]" + + @property + def name(self) -> str_type: + """A string representation of the dtype.""" + return str(self) + + def __hash__(self) -> int: + # make myself hashable + # TODO: update this. + return hash(str(self)) + + def __eq__(self, other: object) -> bool: + if isinstance(other, str): + if other.startswith("M8["): + other = f"datetime64[{other[3:]}" + return other == self.name + + return ( + isinstance(other, DatetimeTZDtype) + and self.unit == other.unit + and tz_compare(self.tz, other.tz) + ) + + def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray: + """ + Construct DatetimeArray from pyarrow Array/ChunkedArray. + + Note: If the units in the pyarrow Array are the same as this + DatetimeDtype, then values corresponding to the integer representation + of ``NaT`` (e.g. one nanosecond before :attr:`pandas.Timestamp.min`) + are converted to ``NaT``, regardless of the null indicator in the + pyarrow array. + + Parameters + ---------- + array : pyarrow.Array or pyarrow.ChunkedArray + The Arrow array to convert to DatetimeArray. + + Returns + ------- + extension array : DatetimeArray + """ + import pyarrow + + from pandas.core.arrays import DatetimeArray + + array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True) + + if isinstance(array, pyarrow.Array): + np_arr = array.to_numpy(zero_copy_only=False) + else: + np_arr = array.to_numpy() + + return DatetimeArray._simple_new(np_arr, dtype=self) + + def __setstate__(self, state) -> None: + # for pickle compat. __get_state__ is defined in the + # PandasExtensionDtype superclass and uses the public properties to + # pickle -> need to set the settable private ones here (see GH26067) + self._tz = state["tz"] + self._unit = state["unit"] + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + if all(isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes): + np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]]) + unit = np.datetime_data(np_dtype)[0] + return type(self)(unit=unit, tz=self.tz) + return super()._get_common_dtype(dtypes) + + @cache_readonly + def index_class(self) -> type_t[DatetimeIndex]: + from pandas import DatetimeIndex + + return DatetimeIndex + + +@register_extension_dtype +class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype): + """ + An ExtensionDtype for Period data. + + **This is not an actual numpy dtype**, but a duck type. + + Parameters + ---------- + freq : str or DateOffset + The frequency of this PeriodDtype. + + Attributes + ---------- + freq + + Methods + ------- + None + + Examples + -------- + >>> pd.PeriodDtype(freq='D') + period[D] + + >>> pd.PeriodDtype(freq=pd.offsets.MonthEnd()) + period[M] + """ + + type: type[Period] = Period + kind: str_type = "O" + str = "|O08" + base = np.dtype("O") + num = 102 + _metadata = ("freq",) + _match = re.compile(r"(P|p)eriod\[(?P.+)\]") + # error: Incompatible types in assignment (expression has type + # "Dict[int, PandasExtensionDtype]", base class "PandasExtensionDtype" + # defined the type as "Dict[str, PandasExtensionDtype]") [assignment] + _cache_dtypes: dict[BaseOffset, int] = {} # type: ignore[assignment] + __hash__ = PeriodDtypeBase.__hash__ + _freq: BaseOffset + _supports_2d = True + _can_fast_transpose = True + + def __new__(cls, freq) -> PeriodDtype: # noqa: PYI034 + """ + Parameters + ---------- + freq : PeriodDtype, BaseOffset, or string + """ + if isinstance(freq, PeriodDtype): + return freq + + if not isinstance(freq, BaseOffset): + freq = cls._parse_dtype_strict(freq) + + if isinstance(freq, BDay): + # GH#53446 + # TODO(3.0): enforcing this will close GH#10575 + warnings.warn( + "PeriodDtype[B] is deprecated and will be removed in a future " + "version. Use a DatetimeIndex with freq='B' instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + try: + dtype_code = cls._cache_dtypes[freq] + except KeyError: + dtype_code = freq._period_dtype_code + cls._cache_dtypes[freq] = dtype_code + u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n) + u._freq = freq + return u + + def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]: + return type(self), (self.name,) + + @property + def freq(self) -> BaseOffset: + """ + The frequency object of this PeriodDtype. + + Examples + -------- + >>> dtype = pd.PeriodDtype(freq='D') + >>> dtype.freq + + """ + return self._freq + + @classmethod + def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset: + if isinstance(freq, str): # note: freq is already of type str! + if freq.startswith(("Period[", "period[")): + m = cls._match.search(freq) + if m is not None: + freq = m.group("freq") + + freq_offset = to_offset(freq, is_period=True) + if freq_offset is not None: + return freq_offset + + raise TypeError( + "PeriodDtype argument should be string or BaseOffset, " + f"got {type(freq).__name__}" + ) + + @classmethod + def construct_from_string(cls, string: str_type) -> PeriodDtype: + """ + Strict construction from a string, raise a TypeError if not + possible + """ + if ( + isinstance(string, str) + and (string.startswith(("period[", "Period["))) + or isinstance(string, BaseOffset) + ): + # do not parse string like U as period[U] + # avoid tuple to be regarded as freq + try: + return cls(freq=string) + except ValueError: + pass + if isinstance(string, str): + msg = f"Cannot construct a 'PeriodDtype' from '{string}'" + else: + msg = f"'construct_from_string' expects a string, got {type(string)}" + raise TypeError(msg) + + def __str__(self) -> str_type: + return self.name + + @property + def name(self) -> str_type: + return f"period[{self._freqstr}]" + + @property + def na_value(self) -> NaTType: + return NaT + + def __eq__(self, other: object) -> bool: + if isinstance(other, str): + return other in [self.name, capitalize_first_letter(self.name)] + + return super().__eq__(other) + + def __ne__(self, other: object) -> bool: + return not self.__eq__(other) + + @classmethod + def is_dtype(cls, dtype: object) -> bool: + """ + Return a boolean if we if the passed type is an actual dtype that we + can match (via string or type) + """ + if isinstance(dtype, str): + # PeriodDtype can be instantiated from freq string like "U", + # but doesn't regard freq str like "U" as dtype. + if dtype.startswith(("period[", "Period[")): + try: + return cls._parse_dtype_strict(dtype) is not None + except ValueError: + return False + else: + return False + return super().is_dtype(dtype) + + @classmethod + def construct_array_type(cls) -> type_t[PeriodArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays import PeriodArray + + return PeriodArray + + def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray: + """ + Construct PeriodArray from pyarrow Array/ChunkedArray. + """ + import pyarrow + + from pandas.core.arrays import PeriodArray + from pandas.core.arrays.arrow._arrow_utils import ( + pyarrow_array_to_numpy_and_mask, + ) + + if isinstance(array, pyarrow.Array): + chunks = [array] + else: + chunks = array.chunks + + results = [] + for arr in chunks: + data, mask = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64)) + parr = PeriodArray(data.copy(), dtype=self, copy=False) + # error: Invalid index type "ndarray[Any, dtype[bool_]]" for "PeriodArray"; + # expected type "Union[int, Sequence[int], Sequence[bool], slice]" + parr[~mask] = NaT # type: ignore[index] + results.append(parr) + + if not results: + return PeriodArray(np.array([], dtype="int64"), dtype=self, copy=False) + return PeriodArray._concat_same_type(results) + + @cache_readonly + def index_class(self) -> type_t[PeriodIndex]: + from pandas import PeriodIndex + + return PeriodIndex + + +@register_extension_dtype +class IntervalDtype(PandasExtensionDtype): + """ + An ExtensionDtype for Interval data. + + **This is not an actual numpy dtype**, but a duck type. + + Parameters + ---------- + subtype : str, np.dtype + The dtype of the Interval bounds. + + Attributes + ---------- + subtype + + Methods + ------- + None + + Examples + -------- + >>> pd.IntervalDtype(subtype='int64', closed='both') + interval[int64, both] + """ + + name = "interval" + kind: str_type = "O" + str = "|O08" + base = np.dtype("O") + num = 103 + _metadata = ( + "subtype", + "closed", + ) + + _match = re.compile( + r"(I|i)nterval\[(?P[^,]+(\[.+\])?)" + r"(, (?P(right|left|both|neither)))?\]" + ) + + _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} + _subtype: None | np.dtype + _closed: IntervalClosedType | None + + def __init__(self, subtype=None, closed: IntervalClosedType | None = None) -> None: + from pandas.core.dtypes.common import ( + is_string_dtype, + pandas_dtype, + ) + + if closed is not None and closed not in {"right", "left", "both", "neither"}: + raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'") + + if isinstance(subtype, IntervalDtype): + if closed is not None and closed != subtype.closed: + raise ValueError( + "dtype.closed and 'closed' do not match. " + "Try IntervalDtype(dtype.subtype, closed) instead." + ) + self._subtype = subtype._subtype + self._closed = subtype._closed + elif subtype is None: + # we are called as an empty constructor + # generally for pickle compat + self._subtype = None + self._closed = closed + elif isinstance(subtype, str) and subtype.lower() == "interval": + self._subtype = None + self._closed = closed + else: + if isinstance(subtype, str): + m = IntervalDtype._match.search(subtype) + if m is not None: + gd = m.groupdict() + subtype = gd["subtype"] + if gd.get("closed", None) is not None: + if closed is not None: + if closed != gd["closed"]: + raise ValueError( + "'closed' keyword does not match value " + "specified in dtype string" + ) + closed = gd["closed"] # type: ignore[assignment] + + try: + subtype = pandas_dtype(subtype) + except TypeError as err: + raise TypeError("could not construct IntervalDtype") from err + if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype): + # GH 19016 + msg = ( + "category, object, and string subtypes are not supported " + "for IntervalDtype" + ) + raise TypeError(msg) + self._subtype = subtype + self._closed = closed + + @cache_readonly + def _can_hold_na(self) -> bool: + subtype = self._subtype + if subtype is None: + # partially-initialized + raise NotImplementedError( + "_can_hold_na is not defined for partially-initialized IntervalDtype" + ) + if subtype.kind in "iu": + return False + return True + + @property + def closed(self) -> IntervalClosedType: + return self._closed # type: ignore[return-value] + + @property + def subtype(self): + """ + The dtype of the Interval bounds. + + Examples + -------- + >>> dtype = pd.IntervalDtype(subtype='int64', closed='both') + >>> dtype.subtype + dtype('int64') + """ + return self._subtype + + @classmethod + def construct_array_type(cls) -> type[IntervalArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays import IntervalArray + + return IntervalArray + + @classmethod + def construct_from_string(cls, string: str_type) -> IntervalDtype: + """ + attempt to construct this type from a string, raise a TypeError + if its not possible + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + + if string.lower() == "interval" or cls._match.search(string) is not None: + return cls(string) + + msg = ( + f"Cannot construct a 'IntervalDtype' from '{string}'.\n\n" + "Incorrectly formatted string passed to constructor. " + "Valid formats include Interval or Interval[dtype] " + "where dtype is numeric, datetime, or timedelta" + ) + raise TypeError(msg) + + @property + def type(self) -> type[Interval]: + return Interval + + def __str__(self) -> str_type: + if self.subtype is None: + return "interval" + if self.closed is None: + # Only partially initialized GH#38394 + return f"interval[{self.subtype}]" + return f"interval[{self.subtype}, {self.closed}]" + + def __hash__(self) -> int: + # make myself hashable + return hash(str(self)) + + def __eq__(self, other: object) -> bool: + if isinstance(other, str): + return other.lower() in (self.name.lower(), str(self).lower()) + elif not isinstance(other, IntervalDtype): + return False + elif self.subtype is None or other.subtype is None: + # None should match any subtype + return True + elif self.closed != other.closed: + return False + else: + return self.subtype == other.subtype + + def __setstate__(self, state) -> None: + # for pickle compat. __get_state__ is defined in the + # PandasExtensionDtype superclass and uses the public properties to + # pickle -> need to set the settable private ones here (see GH26067) + self._subtype = state["subtype"] + + # backward-compat older pickles won't have "closed" key + self._closed = state.pop("closed", None) + + @classmethod + def is_dtype(cls, dtype: object) -> bool: + """ + Return a boolean if we if the passed type is an actual dtype that we + can match (via string or type) + """ + if isinstance(dtype, str): + if dtype.lower().startswith("interval"): + try: + return cls.construct_from_string(dtype) is not None + except (ValueError, TypeError): + return False + else: + return False + return super().is_dtype(dtype) + + def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray: + """ + Construct IntervalArray from pyarrow Array/ChunkedArray. + """ + import pyarrow + + from pandas.core.arrays import IntervalArray + + if isinstance(array, pyarrow.Array): + chunks = [array] + else: + chunks = array.chunks + + results = [] + for arr in chunks: + if isinstance(arr, pyarrow.ExtensionArray): + arr = arr.storage + left = np.asarray(arr.field("left"), dtype=self.subtype) + right = np.asarray(arr.field("right"), dtype=self.subtype) + iarr = IntervalArray.from_arrays(left, right, closed=self.closed) + results.append(iarr) + + if not results: + return IntervalArray.from_arrays( + np.array([], dtype=self.subtype), + np.array([], dtype=self.subtype), + closed=self.closed, + ) + return IntervalArray._concat_same_type(results) + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + if not all(isinstance(x, IntervalDtype) for x in dtypes): + return None + + closed = cast("IntervalDtype", dtypes[0]).closed + if not all(cast("IntervalDtype", x).closed == closed for x in dtypes): + return np.dtype(object) + + from pandas.core.dtypes.cast import find_common_type + + common = find_common_type([cast("IntervalDtype", x).subtype for x in dtypes]) + if common == object: + return np.dtype(object) + return IntervalDtype(common, closed=closed) + + @cache_readonly + def index_class(self) -> type_t[IntervalIndex]: + from pandas import IntervalIndex + + return IntervalIndex + + +class NumpyEADtype(ExtensionDtype): + """ + A Pandas ExtensionDtype for NumPy dtypes. + + This is mostly for internal compatibility, and is not especially + useful on its own. + + Parameters + ---------- + dtype : object + Object to be converted to a NumPy data type object. + + See Also + -------- + numpy.dtype + """ + + _metadata = ("_dtype",) + _supports_2d = False + _can_fast_transpose = False + + def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None: + if isinstance(dtype, NumpyEADtype): + # make constructor idempotent + dtype = dtype.numpy_dtype + self._dtype = np.dtype(dtype) + + def __repr__(self) -> str: + return f"NumpyEADtype({repr(self.name)})" + + @property + def numpy_dtype(self) -> np.dtype: + """ + The NumPy dtype this NumpyEADtype wraps. + """ + return self._dtype + + @property + def name(self) -> str: + """ + A bit-width name for this data-type. + """ + return self._dtype.name + + @property + def type(self) -> type[np.generic]: + """ + The type object used to instantiate a scalar of this NumPy data-type. + """ + return self._dtype.type + + @property + def _is_numeric(self) -> bool: + # exclude object, str, unicode, void. + return self.kind in set("biufc") + + @property + def _is_boolean(self) -> bool: + return self.kind == "b" + + @classmethod + def construct_from_string(cls, string: str) -> NumpyEADtype: + try: + dtype = np.dtype(string) + except TypeError as err: + if not isinstance(string, str): + msg = f"'construct_from_string' expects a string, got {type(string)}" + else: + msg = f"Cannot construct a 'NumpyEADtype' from '{string}'" + raise TypeError(msg) from err + return cls(dtype) + + @classmethod + def construct_array_type(cls) -> type_t[NumpyExtensionArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays import NumpyExtensionArray + + return NumpyExtensionArray + + @property + def kind(self) -> str: + """ + A character code (one of 'biufcmMOSUV') identifying the general kind of data. + """ + return self._dtype.kind + + @property + def itemsize(self) -> int: + """ + The element size of this data-type object. + """ + return self._dtype.itemsize + + +class BaseMaskedDtype(ExtensionDtype): + """ + Base class for dtypes for BaseMaskedArray subclasses. + """ + + base = None + type: type + + @property + def na_value(self) -> libmissing.NAType: + return libmissing.NA + + @cache_readonly + def numpy_dtype(self) -> np.dtype: + """Return an instance of our numpy dtype""" + return np.dtype(self.type) + + @cache_readonly + def kind(self) -> str: + return self.numpy_dtype.kind + + @cache_readonly + def itemsize(self) -> int: + """Return the number of bytes in this dtype""" + return self.numpy_dtype.itemsize + + @classmethod + def construct_array_type(cls) -> type_t[BaseMaskedArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + raise NotImplementedError + + @classmethod + def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype: + """ + Construct the MaskedDtype corresponding to the given numpy dtype. + """ + if dtype.kind == "b": + from pandas.core.arrays.boolean import BooleanDtype + + return BooleanDtype() + elif dtype.kind in "iu": + from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE + + return NUMPY_INT_TO_DTYPE[dtype] + elif dtype.kind == "f": + from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE + + return NUMPY_FLOAT_TO_DTYPE[dtype] + else: + raise NotImplementedError(dtype) + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + # We unwrap any masked dtypes, find the common dtype we would use + # for that, then re-mask the result. + from pandas.core.dtypes.cast import find_common_type + + new_dtype = find_common_type( + [ + dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype + for dtype in dtypes + ] + ) + if not isinstance(new_dtype, np.dtype): + # If we ever support e.g. Masked[DatetimeArray] then this will change + return None + try: + return type(self).from_numpy_dtype(new_dtype) + except (KeyError, NotImplementedError): + return None + + +@register_extension_dtype +class SparseDtype(ExtensionDtype): + """ + Dtype for data stored in :class:`SparseArray`. + + This dtype implements the pandas ExtensionDtype interface. + + Parameters + ---------- + dtype : str, ExtensionDtype, numpy.dtype, type, default numpy.float64 + The dtype of the underlying array storing the non-fill value values. + fill_value : scalar, optional + The scalar value not stored in the SparseArray. By default, this + depends on `dtype`. + + =========== ========== + dtype na_value + =========== ========== + float ``np.nan`` + int ``0`` + bool ``False`` + datetime64 ``pd.NaT`` + timedelta64 ``pd.NaT`` + =========== ========== + + The default value may be overridden by specifying a `fill_value`. + + Attributes + ---------- + None + + Methods + ------- + None + + Examples + -------- + >>> ser = pd.Series([1, 0, 0], dtype=pd.SparseDtype(dtype=int, fill_value=0)) + >>> ser + 0 1 + 1 0 + 2 0 + dtype: Sparse[int64, 0] + >>> ser.sparse.density + 0.3333333333333333 + """ + + _is_immutable = True + + # We include `_is_na_fill_value` in the metadata to avoid hash collisions + # between SparseDtype(float, 0.0) and SparseDtype(float, nan). + # Without is_na_fill_value in the comparison, those would be equal since + # hash(nan) is (sometimes?) 0. + _metadata = ("_dtype", "_fill_value", "_is_na_fill_value") + + def __init__(self, dtype: Dtype = np.float64, fill_value: Any = None) -> None: + if isinstance(dtype, type(self)): + if fill_value is None: + fill_value = dtype.fill_value + dtype = dtype.subtype + + from pandas.core.dtypes.common import ( + is_string_dtype, + pandas_dtype, + ) + from pandas.core.dtypes.missing import na_value_for_dtype + + dtype = pandas_dtype(dtype) + if is_string_dtype(dtype): + dtype = np.dtype("object") + if not isinstance(dtype, np.dtype): + # GH#53160 + raise TypeError("SparseDtype subtype must be a numpy dtype") + + if fill_value is None: + fill_value = na_value_for_dtype(dtype) + + self._dtype = dtype + self._fill_value = fill_value + self._check_fill_value() + + def __hash__(self) -> int: + # Python3 doesn't inherit __hash__ when a base class overrides + # __eq__, so we explicitly do it here. + return super().__hash__() + + def __eq__(self, other: object) -> bool: + # We have to override __eq__ to handle NA values in _metadata. + # The base class does simple == checks, which fail for NA. + if isinstance(other, str): + try: + other = self.construct_from_string(other) + except TypeError: + return False + + if isinstance(other, type(self)): + subtype = self.subtype == other.subtype + if self._is_na_fill_value: + # this case is complicated by two things: + # SparseDtype(float, float(nan)) == SparseDtype(float, np.nan) + # SparseDtype(float, np.nan) != SparseDtype(float, pd.NaT) + # i.e. we want to treat any floating-point NaN as equal, but + # not a floating-point NaN and a datetime NaT. + fill_value = ( + other._is_na_fill_value + and isinstance(self.fill_value, type(other.fill_value)) + or isinstance(other.fill_value, type(self.fill_value)) + ) + else: + with warnings.catch_warnings(): + # Ignore spurious numpy warning + warnings.filterwarnings( + "ignore", + "elementwise comparison failed", + category=DeprecationWarning, + ) + + fill_value = self.fill_value == other.fill_value + + return subtype and fill_value + return False + + @property + def fill_value(self): + """ + The fill value of the array. + + Converting the SparseArray to a dense ndarray will fill the + array with this value. + + .. warning:: + + It's possible to end up with a SparseArray that has ``fill_value`` + values in ``sp_values``. This can occur, for example, when setting + ``SparseArray.fill_value`` directly. + """ + return self._fill_value + + def _check_fill_value(self) -> None: + if not lib.is_scalar(self._fill_value): + raise ValueError( + f"fill_value must be a scalar. Got {self._fill_value} instead" + ) + + from pandas.core.dtypes.cast import can_hold_element + from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + ) + + from pandas.core.construction import ensure_wrapped_if_datetimelike + + # GH#23124 require fill_value and subtype to match + val = self._fill_value + if isna(val): + if not is_valid_na_for_dtype(val, self.subtype): + warnings.warn( + "Allowing arbitrary scalar fill_value in SparseDtype is " + "deprecated. In a future version, the fill_value must be " + "a valid value for the SparseDtype.subtype.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + dummy = np.empty(0, dtype=self.subtype) + dummy = ensure_wrapped_if_datetimelike(dummy) + + if not can_hold_element(dummy, val): + warnings.warn( + "Allowing arbitrary scalar fill_value in SparseDtype is " + "deprecated. In a future version, the fill_value must be " + "a valid value for the SparseDtype.subtype.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + @property + def _is_na_fill_value(self) -> bool: + from pandas import isna + + return isna(self.fill_value) + + @property + def _is_numeric(self) -> bool: + return not self.subtype == object + + @property + def _is_boolean(self) -> bool: + return self.subtype.kind == "b" + + @property + def kind(self) -> str: + """ + The sparse kind. Either 'integer', or 'block'. + """ + return self.subtype.kind + + @property + def type(self): + return self.subtype.type + + @property + def subtype(self): + return self._dtype + + @property + def name(self) -> str: + return f"Sparse[{self.subtype.name}, {repr(self.fill_value)}]" + + def __repr__(self) -> str: + return self.name + + @classmethod + def construct_array_type(cls) -> type_t[SparseArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays.sparse.array import SparseArray + + return SparseArray + + @classmethod + def construct_from_string(cls, string: str) -> SparseDtype: + """ + Construct a SparseDtype from a string form. + + Parameters + ---------- + string : str + Can take the following forms. + + string dtype + ================ ============================ + 'int' SparseDtype[np.int64, 0] + 'Sparse' SparseDtype[np.float64, nan] + 'Sparse[int]' SparseDtype[np.int64, 0] + 'Sparse[int, 0]' SparseDtype[np.int64, 0] + ================ ============================ + + It is not possible to specify non-default fill values + with a string. An argument like ``'Sparse[int, 1]'`` + will raise a ``TypeError`` because the default fill value + for integers is 0. + + Returns + ------- + SparseDtype + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + msg = f"Cannot construct a 'SparseDtype' from '{string}'" + if string.startswith("Sparse"): + try: + sub_type, has_fill_value = cls._parse_subtype(string) + except ValueError as err: + raise TypeError(msg) from err + else: + result = SparseDtype(sub_type) + msg = ( + f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt " + "looks like the fill_value in the string is not " + "the default for the dtype. Non-default fill_values " + "are not supported. Use the 'SparseDtype()' " + "constructor instead." + ) + if has_fill_value and str(result) != string: + raise TypeError(msg) + return result + else: + raise TypeError(msg) + + @staticmethod + def _parse_subtype(dtype: str) -> tuple[str, bool]: + """ + Parse a string to get the subtype + + Parameters + ---------- + dtype : str + A string like + + * Sparse[subtype] + * Sparse[subtype, fill_value] + + Returns + ------- + subtype : str + + Raises + ------ + ValueError + When the subtype cannot be extracted. + """ + xpr = re.compile(r"Sparse\[(?P[^,]*)(, )?(?P.*?)?\]$") + m = xpr.match(dtype) + has_fill_value = False + if m: + subtype = m.groupdict()["subtype"] + has_fill_value = bool(m.groupdict()["fill_value"]) + elif dtype == "Sparse": + subtype = "float64" + else: + raise ValueError(f"Cannot parse {dtype}") + return subtype, has_fill_value + + @classmethod + def is_dtype(cls, dtype: object) -> bool: + dtype = getattr(dtype, "dtype", dtype) + if isinstance(dtype, str) and dtype.startswith("Sparse"): + sub_type, _ = cls._parse_subtype(dtype) + dtype = np.dtype(sub_type) + elif isinstance(dtype, cls): + return True + return isinstance(dtype, np.dtype) or dtype == "Sparse" + + def update_dtype(self, dtype) -> SparseDtype: + """ + Convert the SparseDtype to a new dtype. + + This takes care of converting the ``fill_value``. + + Parameters + ---------- + dtype : Union[str, numpy.dtype, SparseDtype] + The new dtype to use. + + * For a SparseDtype, it is simply returned + * For a NumPy dtype (or str), the current fill value + is converted to the new dtype, and a SparseDtype + with `dtype` and the new fill value is returned. + + Returns + ------- + SparseDtype + A new SparseDtype with the correct `dtype` and fill value + for that `dtype`. + + Raises + ------ + ValueError + When the current fill value cannot be converted to the + new `dtype` (e.g. trying to convert ``np.nan`` to an + integer dtype). + + + Examples + -------- + >>> SparseDtype(int, 0).update_dtype(float) + Sparse[float64, 0.0] + + >>> SparseDtype(int, 1).update_dtype(SparseDtype(float, np.nan)) + Sparse[float64, nan] + """ + from pandas.core.dtypes.astype import astype_array + from pandas.core.dtypes.common import pandas_dtype + + cls = type(self) + dtype = pandas_dtype(dtype) + + if not isinstance(dtype, cls): + if not isinstance(dtype, np.dtype): + raise TypeError("sparse arrays of extension dtypes not supported") + + fv_asarray = np.atleast_1d(np.array(self.fill_value)) + fvarr = astype_array(fv_asarray, dtype) + # NB: not fv_0d.item(), as that casts dt64->int + fill_value = fvarr[0] + dtype = cls(dtype, fill_value=fill_value) + + return dtype + + @property + def _subtype_with_str(self): + """ + Whether the SparseDtype's subtype should be considered ``str``. + + Typically, pandas will store string data in an object-dtype array. + When converting values to a dtype, e.g. in ``.astype``, we need to + be more specific, we need the actual underlying type. + + Returns + ------- + >>> SparseDtype(int, 1)._subtype_with_str + dtype('int64') + + >>> SparseDtype(object, 1)._subtype_with_str + dtype('O') + + >>> dtype = SparseDtype(str, '') + >>> dtype.subtype + dtype('O') + + >>> dtype._subtype_with_str + + """ + if isinstance(self.fill_value, str): + return type(self.fill_value) + return self.subtype + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + # TODO for now only handle SparseDtypes and numpy dtypes => extend + # with other compatible extension dtypes + from pandas.core.dtypes.cast import np_find_common_type + + if any( + isinstance(x, ExtensionDtype) and not isinstance(x, SparseDtype) + for x in dtypes + ): + return None + + fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)] + fill_value = fill_values[0] + + from pandas import isna + + # np.nan isn't a singleton, so we may end up with multiple + # NaNs here, so we ignore the all NA case too. + if not (len(set(fill_values)) == 1 or isna(fill_values).all()): + warnings.warn( + "Concatenating sparse arrays with multiple fill " + f"values: '{fill_values}'. Picking the first and " + "converting the rest.", + PerformanceWarning, + stacklevel=find_stack_level(), + ) + + np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes) + return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value) + + +@register_extension_dtype +class ArrowDtype(StorageExtensionDtype): + """ + An ExtensionDtype for PyArrow data types. + + .. warning:: + + ArrowDtype is considered experimental. The implementation and + parts of the API may change without warning. + + While most ``dtype`` arguments can accept the "string" + constructor, e.g. ``"int64[pyarrow]"``, ArrowDtype is useful + if the data type contains parameters like ``pyarrow.timestamp``. + + Parameters + ---------- + pyarrow_dtype : pa.DataType + An instance of a `pyarrow.DataType `__. + + Attributes + ---------- + pyarrow_dtype + + Methods + ------- + None + + Returns + ------- + ArrowDtype + + Examples + -------- + >>> import pyarrow as pa + >>> pd.ArrowDtype(pa.int64()) + int64[pyarrow] + + Types with parameters must be constructed with ArrowDtype. + + >>> pd.ArrowDtype(pa.timestamp("s", tz="America/New_York")) + timestamp[s, tz=America/New_York][pyarrow] + >>> pd.ArrowDtype(pa.list_(pa.int64())) + list[pyarrow] + """ + + _metadata = ("storage", "pyarrow_dtype") # type: ignore[assignment] + + def __init__(self, pyarrow_dtype: pa.DataType) -> None: + super().__init__("pyarrow") + if pa_version_under10p1: + raise ImportError("pyarrow>=10.0.1 is required for ArrowDtype") + if not isinstance(pyarrow_dtype, pa.DataType): + raise ValueError( + f"pyarrow_dtype ({pyarrow_dtype}) must be an instance " + f"of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead." + ) + self.pyarrow_dtype = pyarrow_dtype + + def __repr__(self) -> str: + return self.name + + def __hash__(self) -> int: + # make myself hashable + return hash(str(self)) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, type(self)): + return super().__eq__(other) + return self.pyarrow_dtype == other.pyarrow_dtype + + @property + def type(self): + """ + Returns associated scalar type. + """ + pa_type = self.pyarrow_dtype + if pa.types.is_integer(pa_type): + return int + elif pa.types.is_floating(pa_type): + return float + elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type): + return str + elif ( + pa.types.is_binary(pa_type) + or pa.types.is_fixed_size_binary(pa_type) + or pa.types.is_large_binary(pa_type) + ): + return bytes + elif pa.types.is_boolean(pa_type): + return bool + elif pa.types.is_duration(pa_type): + if pa_type.unit == "ns": + return Timedelta + else: + return timedelta + elif pa.types.is_timestamp(pa_type): + if pa_type.unit == "ns": + return Timestamp + else: + return datetime + elif pa.types.is_date(pa_type): + return date + elif pa.types.is_time(pa_type): + return time + elif pa.types.is_decimal(pa_type): + return Decimal + elif pa.types.is_dictionary(pa_type): + # TODO: Potentially change this & CategoricalDtype.type to + # something more representative of the scalar + return CategoricalDtypeType + elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type): + return list + elif pa.types.is_fixed_size_list(pa_type): + return list + elif pa.types.is_map(pa_type): + return list + elif pa.types.is_struct(pa_type): + return dict + elif pa.types.is_null(pa_type): + # TODO: None? pd.NA? pa.null? + return type(pa_type) + elif isinstance(pa_type, pa.ExtensionType): + return type(self)(pa_type.storage_type).type + raise NotImplementedError(pa_type) + + @property + def name(self) -> str: # type: ignore[override] + """ + A string identifying the data type. + """ + return f"{str(self.pyarrow_dtype)}[{self.storage}]" + + @cache_readonly + def numpy_dtype(self) -> np.dtype: + """Return an instance of the related numpy dtype""" + if pa.types.is_timestamp(self.pyarrow_dtype): + # pa.timestamp(unit).to_pandas_dtype() returns ns units + # regardless of the pyarrow timestamp units. + # This can be removed if/when pyarrow addresses it: + # https://github.com/apache/arrow/issues/34462 + return np.dtype(f"datetime64[{self.pyarrow_dtype.unit}]") + if pa.types.is_duration(self.pyarrow_dtype): + # pa.duration(unit).to_pandas_dtype() returns ns units + # regardless of the pyarrow duration units + # This can be removed if/when pyarrow addresses it: + # https://github.com/apache/arrow/issues/34462 + return np.dtype(f"timedelta64[{self.pyarrow_dtype.unit}]") + if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string( + self.pyarrow_dtype + ): + # pa.string().to_pandas_dtype() = object which we don't want + return np.dtype(str) + try: + return np.dtype(self.pyarrow_dtype.to_pandas_dtype()) + except (NotImplementedError, TypeError): + return np.dtype(object) + + @cache_readonly + def kind(self) -> str: + if pa.types.is_timestamp(self.pyarrow_dtype): + # To mirror DatetimeTZDtype + return "M" + return self.numpy_dtype.kind + + @cache_readonly + def itemsize(self) -> int: + """Return the number of bytes in this dtype""" + return self.numpy_dtype.itemsize + + @classmethod + def construct_array_type(cls) -> type_t[ArrowExtensionArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + from pandas.core.arrays.arrow import ArrowExtensionArray + + return ArrowExtensionArray + + @classmethod + def construct_from_string(cls, string: str) -> ArrowDtype: + """ + Construct this type from a string. + + Parameters + ---------- + string : str + string should follow the format f"{pyarrow_type}[pyarrow]" + e.g. int64[pyarrow] + """ + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + if not string.endswith("[pyarrow]"): + raise TypeError(f"'{string}' must end with '[pyarrow]'") + if string == "string[pyarrow]": + # Ensure Registry.find skips ArrowDtype to use StringDtype instead + raise TypeError("string[pyarrow] should be constructed by StringDtype") + + base_type = string[:-9] # get rid of "[pyarrow]" + try: + pa_dtype = pa.type_for_alias(base_type) + except ValueError as err: + has_parameters = re.search(r"[\[\(].*[\]\)]", base_type) + if has_parameters: + # Fallback to try common temporal types + try: + return cls._parse_temporal_dtype_string(base_type) + except (NotImplementedError, ValueError): + # Fall through to raise with nice exception message below + pass + + raise NotImplementedError( + "Passing pyarrow type specific parameters " + f"({has_parameters.group()}) in the string is not supported. " + "Please construct an ArrowDtype object with a pyarrow_dtype " + "instance with specific parameters." + ) from err + raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err + return cls(pa_dtype) + + # TODO(arrow#33642): This can be removed once supported by pyarrow + @classmethod + def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype: + """ + Construct a temporal ArrowDtype from string. + """ + # we assume + # 1) "[pyarrow]" has already been stripped from the end of our string. + # 2) we know "[" is present + head, tail = string.split("[", 1) + + if not tail.endswith("]"): + raise ValueError + tail = tail[:-1] + + if head == "timestamp": + assert "," in tail # otherwise type_for_alias should work + unit, tz = tail.split(",", 1) + unit = unit.strip() + tz = tz.strip() + if tz.startswith("tz="): + tz = tz[3:] + + pa_type = pa.timestamp(unit, tz=tz) + dtype = cls(pa_type) + return dtype + + raise NotImplementedError(string) + + @property + def _is_numeric(self) -> bool: + """ + Whether columns with this dtype should be considered numeric. + """ + # TODO: pa.types.is_boolean? + return ( + pa.types.is_integer(self.pyarrow_dtype) + or pa.types.is_floating(self.pyarrow_dtype) + or pa.types.is_decimal(self.pyarrow_dtype) + ) + + @property + def _is_boolean(self) -> bool: + """ + Whether this dtype should be considered boolean. + """ + return pa.types.is_boolean(self.pyarrow_dtype) + + def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: + # We unwrap any masked dtypes, find the common dtype we would use + # for that, then re-mask the result. + # Mirrors BaseMaskedDtype + from pandas.core.dtypes.cast import find_common_type + + null_dtype = type(self)(pa.null()) + + new_dtype = find_common_type( + [ + dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype + for dtype in dtypes + if dtype != null_dtype + ] + ) + if not isinstance(new_dtype, np.dtype): + return None + try: + pa_dtype = pa.from_numpy_dtype(new_dtype) + return type(self)(pa_dtype) + except NotImplementedError: + return None + + def __from_arrow__(self, array: pa.Array | pa.ChunkedArray): + """ + Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray. + """ + array_class = self.construct_array_type() + arr = array.cast(self.pyarrow_dtype, safe=True) + return array_class(arr) diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/generic.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..9718ad600cb80b6e38f069a83aaf35ddb376fb00 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/generic.py @@ -0,0 +1,147 @@ +""" define generic base classes for pandas objects """ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Type, + cast, +) + +if TYPE_CHECKING: + from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, + ) + from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, + ) + from pandas.core.generic import NDFrame + + +# define abstract base classes to enable isinstance type checking on our +# objects +def create_pandas_abc_type(name, attr, comp): + def _check(inst) -> bool: + return getattr(inst, attr, "_typ") in comp + + # https://github.com/python/mypy/issues/1006 + # error: 'classmethod' used with a non-method + @classmethod # type: ignore[misc] + def _instancecheck(cls, inst) -> bool: + return _check(inst) and not isinstance(inst, type) + + @classmethod # type: ignore[misc] + def _subclasscheck(cls, inst) -> bool: + # Raise instead of returning False + # This is consistent with default __subclasscheck__ behavior + if not isinstance(inst, type): + raise TypeError("issubclass() arg 1 must be a class") + + return _check(inst) + + dct = {"__instancecheck__": _instancecheck, "__subclasscheck__": _subclasscheck} + meta = type("ABCBase", (type,), dct) + return meta(name, (), dct) + + +ABCRangeIndex = cast( + "Type[RangeIndex]", + create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",)), +) +ABCMultiIndex = cast( + "Type[MultiIndex]", + create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)), +) +ABCDatetimeIndex = cast( + "Type[DatetimeIndex]", + create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)), +) +ABCTimedeltaIndex = cast( + "Type[TimedeltaIndex]", + create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)), +) +ABCPeriodIndex = cast( + "Type[PeriodIndex]", + create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)), +) +ABCCategoricalIndex = cast( + "Type[CategoricalIndex]", + create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)), +) +ABCIntervalIndex = cast( + "Type[IntervalIndex]", + create_pandas_abc_type("ABCIntervalIndex", "_typ", ("intervalindex",)), +) +ABCIndex = cast( + "Type[Index]", + create_pandas_abc_type( + "ABCIndex", + "_typ", + { + "index", + "rangeindex", + "multiindex", + "datetimeindex", + "timedeltaindex", + "periodindex", + "categoricalindex", + "intervalindex", + }, + ), +) + + +ABCNDFrame = cast( + "Type[NDFrame]", + create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")), +) +ABCSeries = cast( + "Type[Series]", + create_pandas_abc_type("ABCSeries", "_typ", ("series",)), +) +ABCDataFrame = cast( + "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) +) + +ABCCategorical = cast( + "Type[Categorical]", + create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")), +) +ABCDatetimeArray = cast( + "Type[DatetimeArray]", + create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray")), +) +ABCTimedeltaArray = cast( + "Type[TimedeltaArray]", + create_pandas_abc_type("ABCTimedeltaArray", "_typ", ("timedeltaarray")), +) +ABCPeriodArray = cast( + "Type[PeriodArray]", + create_pandas_abc_type("ABCPeriodArray", "_typ", ("periodarray",)), +) +ABCExtensionArray = cast( + "Type[ExtensionArray]", + create_pandas_abc_type( + "ABCExtensionArray", + "_typ", + # Note: IntervalArray and SparseArray are included bc they have _typ="extension" + {"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"}, + ), +) +ABCNumpyExtensionArray = cast( + "Type[NumpyExtensionArray]", + create_pandas_abc_type("ABCNumpyExtensionArray", "_typ", ("npy_extension",)), +) diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/inference.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..f551716772f61455a330bdb308cee830bd54fb03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/inference.py @@ -0,0 +1,437 @@ +""" basic inference routines """ + +from __future__ import annotations + +from collections import abc +from numbers import Number +import re +from re import Pattern +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs import lib + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import TypeGuard + +is_bool = lib.is_bool + +is_integer = lib.is_integer + +is_float = lib.is_float + +is_complex = lib.is_complex + +is_scalar = lib.is_scalar + +is_decimal = lib.is_decimal + +is_interval = lib.is_interval + +is_list_like = lib.is_list_like + +is_iterator = lib.is_iterator + + +def is_number(obj) -> TypeGuard[Number | np.number]: + """ + Check if the object is a number. + + Returns True when the object is a number, and False if is not. + + Parameters + ---------- + obj : any type + The object to check if is a number. + + Returns + ------- + bool + Whether `obj` is a number or not. + + See Also + -------- + api.types.is_integer: Checks a subgroup of numbers. + + Examples + -------- + >>> from pandas.api.types import is_number + >>> is_number(1) + True + >>> is_number(7.15) + True + + Booleans are valid because they are int subclass. + + >>> is_number(False) + True + + >>> is_number("foo") + False + >>> is_number("5") + False + """ + return isinstance(obj, (Number, np.number)) + + +def iterable_not_string(obj) -> bool: + """ + Check if the object is an iterable but not a string. + + Parameters + ---------- + obj : The object to check. + + Returns + ------- + is_iter_not_string : bool + Whether `obj` is a non-string iterable. + + Examples + -------- + >>> iterable_not_string([1, 2, 3]) + True + >>> iterable_not_string("foo") + False + >>> iterable_not_string(1) + False + """ + return isinstance(obj, abc.Iterable) and not isinstance(obj, str) + + +def is_file_like(obj) -> bool: + """ + Check if the object is a file-like object. + + For objects to be considered file-like, they must + be an iterator AND have either a `read` and/or `write` + method as an attribute. + + Note: file-like objects must be iterable, but + iterable objects need not be file-like. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` has file-like properties. + + Examples + -------- + >>> import io + >>> from pandas.api.types import is_file_like + >>> buffer = io.StringIO("data") + >>> is_file_like(buffer) + True + >>> is_file_like([1, 2, 3]) + False + """ + if not (hasattr(obj, "read") or hasattr(obj, "write")): + return False + + return bool(hasattr(obj, "__iter__")) + + +def is_re(obj) -> TypeGuard[Pattern]: + """ + Check if the object is a regex pattern instance. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` is a regex pattern. + + Examples + -------- + >>> from pandas.api.types import is_re + >>> import re + >>> is_re(re.compile(".*")) + True + >>> is_re("foo") + False + """ + return isinstance(obj, Pattern) + + +def is_re_compilable(obj) -> bool: + """ + Check if the object can be compiled into a regex pattern instance. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` can be compiled as a regex pattern. + + Examples + -------- + >>> from pandas.api.types import is_re_compilable + >>> is_re_compilable(".*") + True + >>> is_re_compilable(1) + False + """ + try: + re.compile(obj) + except TypeError: + return False + else: + return True + + +def is_array_like(obj) -> bool: + """ + Check if the object is array-like. + + For an object to be considered array-like, it must be list-like and + have a `dtype` attribute. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + is_array_like : bool + Whether `obj` has array-like properties. + + Examples + -------- + >>> is_array_like(np.array([1, 2, 3])) + True + >>> is_array_like(pd.Series(["a", "b"])) + True + >>> is_array_like(pd.Index(["2016-01-01"])) + True + >>> is_array_like([1, 2, 3]) + False + >>> is_array_like(("a", "b")) + False + """ + return is_list_like(obj) and hasattr(obj, "dtype") + + +def is_nested_list_like(obj) -> bool: + """ + Check if the object is list-like, and that all of its elements + are also list-like. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + is_list_like : bool + Whether `obj` has list-like properties. + + Examples + -------- + >>> is_nested_list_like([[1, 2, 3]]) + True + >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) + True + >>> is_nested_list_like(["foo"]) + False + >>> is_nested_list_like([]) + False + >>> is_nested_list_like([[1, 2, 3], 1]) + False + + Notes + ----- + This won't reliably detect whether a consumable iterator (e. g. + a generator) is a nested-list-like without consuming the iterator. + To avoid consuming it, we always return False if the outer container + doesn't define `__len__`. + + See Also + -------- + is_list_like + """ + return ( + is_list_like(obj) + and hasattr(obj, "__len__") + and len(obj) > 0 + and all(is_list_like(item) for item in obj) + ) + + +def is_dict_like(obj) -> bool: + """ + Check if the object is dict-like. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` has dict-like properties. + + Examples + -------- + >>> from pandas.api.types import is_dict_like + >>> is_dict_like({1: 2}) + True + >>> is_dict_like([1, 2, 3]) + False + >>> is_dict_like(dict) + False + >>> is_dict_like(dict()) + True + """ + dict_like_attrs = ("__getitem__", "keys", "__contains__") + return ( + all(hasattr(obj, attr) for attr in dict_like_attrs) + # [GH 25196] exclude classes + and not isinstance(obj, type) + ) + + +def is_named_tuple(obj) -> bool: + """ + Check if the object is a named tuple. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + bool + Whether `obj` is a named tuple. + + Examples + -------- + >>> from collections import namedtuple + >>> from pandas.api.types import is_named_tuple + >>> Point = namedtuple("Point", ["x", "y"]) + >>> p = Point(1, 2) + >>> + >>> is_named_tuple(p) + True + >>> is_named_tuple((1, 2)) + False + """ + return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields") + + +def is_hashable(obj) -> TypeGuard[Hashable]: + """ + Return True if hash(obj) will succeed, False otherwise. + + Some types will pass a test against collections.abc.Hashable but fail when + they are actually hashed with hash(). + + Distinguish between these and other types by trying the call to hash() and + seeing if they raise TypeError. + + Returns + ------- + bool + + Examples + -------- + >>> import collections + >>> from pandas.api.types import is_hashable + >>> a = ([],) + >>> isinstance(a, collections.abc.Hashable) + True + >>> is_hashable(a) + False + """ + # Unfortunately, we can't use isinstance(obj, collections.abc.Hashable), + # which can be faster than calling hash. That is because numpy scalars + # fail this test. + + # Reconsider this decision once this numpy bug is fixed: + # https://github.com/numpy/numpy/issues/5562 + + try: + hash(obj) + except TypeError: + return False + else: + return True + + +def is_sequence(obj) -> bool: + """ + Check if the object is a sequence of objects. + String types are not included as sequences here. + + Parameters + ---------- + obj : The object to check + + Returns + ------- + is_sequence : bool + Whether `obj` is a sequence of objects. + + Examples + -------- + >>> l = [1, 2, 3] + >>> + >>> is_sequence(l) + True + >>> is_sequence(iter(l)) + False + """ + try: + iter(obj) # Can iterate over it. + len(obj) # Has a length associated with it. + return not isinstance(obj, (str, bytes)) + except (TypeError, AttributeError): + return False + + +def is_dataclass(item) -> bool: + """ + Checks if the object is a data-class instance + + Parameters + ---------- + item : object + + Returns + -------- + is_dataclass : bool + True if the item is an instance of a data-class, + will return false if you pass the data class itself + + Examples + -------- + >>> from dataclasses import dataclass + >>> @dataclass + ... class Point: + ... x: int + ... y: int + + >>> is_dataclass(Point) + False + >>> is_dataclass(Point(0,2)) + True + + """ + try: + import dataclasses + + return dataclasses.is_dataclass(item) and not isinstance(item, type) + except ImportError: + return False diff --git a/venv/lib/python3.10/site-packages/pandas/core/dtypes/missing.py b/venv/lib/python3.10/site-packages/pandas/core/dtypes/missing.py new file mode 100644 index 0000000000000000000000000000000000000000..c341ff9dff7e613d8db2209efb5c10f170a9cd47 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/dtypes/missing.py @@ -0,0 +1,810 @@ +""" +missing types & inference +""" +from __future__ import annotations + +from decimal import Decimal +from functools import partial +from typing import ( + TYPE_CHECKING, + overload, +) +import warnings + +import numpy as np + +from pandas._config import get_option + +from pandas._libs import lib +import pandas._libs.missing as libmissing +from pandas._libs.tslibs import ( + NaT, + iNaT, +) + +from pandas.core.dtypes.common import ( + DT64NS_DTYPE, + TD64NS_DTYPE, + ensure_object, + is_scalar, + is_string_or_object_np_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCExtensionArray, + ABCIndex, + ABCMultiIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import is_list_like + +if TYPE_CHECKING: + from re import Pattern + + from pandas._typing import ( + ArrayLike, + DtypeObj, + NDFrame, + NDFrameT, + Scalar, + npt, + ) + + from pandas import Series + from pandas.core.indexes.base import Index + + +isposinf_scalar = libmissing.isposinf_scalar +isneginf_scalar = libmissing.isneginf_scalar + +nan_checker = np.isnan +INF_AS_NA = False +_dtype_object = np.dtype("object") +_dtype_str = np.dtype(str) + + +@overload +def isna(obj: Scalar | Pattern) -> bool: + ... + + +@overload +def isna( + obj: ArrayLike | Index | list, +) -> npt.NDArray[np.bool_]: + ... + + +@overload +def isna(obj: NDFrameT) -> NDFrameT: + ... + + +# handle unions +@overload +def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: + ... + + +@overload +def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + ... + + +def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + """ + Detect missing values for an array-like object. + + This function takes a scalar or array-like object and indicates + whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN`` + in object arrays, ``NaT`` in datetimelike). + + Parameters + ---------- + obj : scalar or array-like + Object to check for null or missing values. + + Returns + ------- + bool or array-like of bool + For scalar input, returns a scalar boolean. + For array input, returns an array of boolean indicating whether each + corresponding element is missing. + + See Also + -------- + notna : Boolean inverse of pandas.isna. + Series.isna : Detect missing values in a Series. + DataFrame.isna : Detect missing values in a DataFrame. + Index.isna : Detect missing values in an Index. + + Examples + -------- + Scalar arguments (including strings) result in a scalar boolean. + + >>> pd.isna('dog') + False + + >>> pd.isna(pd.NA) + True + + >>> pd.isna(np.nan) + True + + ndarrays result in an ndarray of booleans. + + >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) + >>> array + array([[ 1., nan, 3.], + [ 4., 5., nan]]) + >>> pd.isna(array) + array([[False, True, False], + [False, False, True]]) + + For indexes, an ndarray of booleans is returned. + + >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, + ... "2017-07-08"]) + >>> index + DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], + dtype='datetime64[ns]', freq=None) + >>> pd.isna(index) + array([False, False, True, False]) + + For Series and DataFrame, the same type is returned, containing booleans. + + >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) + >>> df + 0 1 2 + 0 ant bee cat + 1 dog None fly + >>> pd.isna(df) + 0 1 2 + 0 False False False + 1 False True False + + >>> pd.isna(df[1]) + 0 False + 1 True + Name: 1, dtype: bool + """ + return _isna(obj) + + +isnull = isna + + +def _isna(obj, inf_as_na: bool = False): + """ + Detect missing values, treating None, NaN or NA as null. Infinite + values will also be treated as null if inf_as_na is True. + + Parameters + ---------- + obj: ndarray or object value + Input array or scalar value. + inf_as_na: bool + Whether to treat infinity as null. + + Returns + ------- + boolean ndarray or boolean + """ + if is_scalar(obj): + return libmissing.checknull(obj, inf_as_na=inf_as_na) + elif isinstance(obj, ABCMultiIndex): + raise NotImplementedError("isna is not defined for MultiIndex") + elif isinstance(obj, type): + return False + elif isinstance(obj, (np.ndarray, ABCExtensionArray)): + return _isna_array(obj, inf_as_na=inf_as_na) + elif isinstance(obj, ABCIndex): + # Try to use cached isna, which also short-circuits for integer dtypes + # and avoids materializing RangeIndex._values + if not obj._can_hold_na: + return obj.isna() + return _isna_array(obj._values, inf_as_na=inf_as_na) + + elif isinstance(obj, ABCSeries): + result = _isna_array(obj._values, inf_as_na=inf_as_na) + # box + result = obj._constructor(result, index=obj.index, name=obj.name, copy=False) + return result + elif isinstance(obj, ABCDataFrame): + return obj.isna() + elif isinstance(obj, list): + return _isna_array(np.asarray(obj, dtype=object), inf_as_na=inf_as_na) + elif hasattr(obj, "__array__"): + return _isna_array(np.asarray(obj), inf_as_na=inf_as_na) + else: + return False + + +def _use_inf_as_na(key) -> None: + """ + Option change callback for na/inf behaviour. + + Choose which replacement for numpy.isnan / -numpy.isfinite is used. + + Parameters + ---------- + flag: bool + True means treat None, NaN, INF, -INF as null (old way), + False means None and NaN are null, but INF, -INF are not null + (new way). + + Notes + ----- + This approach to setting global module values is discussed and + approved here: + + * https://stackoverflow.com/questions/4859217/ + programmatically-creating-variables-in-python/4859312#4859312 + """ + inf_as_na = get_option(key) + globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na) + if inf_as_na: + globals()["nan_checker"] = lambda x: ~np.isfinite(x) + globals()["INF_AS_NA"] = True + else: + globals()["nan_checker"] = np.isnan + globals()["INF_AS_NA"] = False + + +def _isna_array(values: ArrayLike, inf_as_na: bool = False): + """ + Return an array indicating which values of the input array are NaN / NA. + + Parameters + ---------- + obj: ndarray or ExtensionArray + The input array whose elements are to be checked. + inf_as_na: bool + Whether or not to treat infinite values as NA. + + Returns + ------- + array-like + Array of boolean values denoting the NA status of each element. + """ + dtype = values.dtype + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + if inf_as_na and isinstance(dtype, CategoricalDtype): + result = libmissing.isnaobj(values.to_numpy(), inf_as_na=inf_as_na) + else: + # error: Incompatible types in assignment (expression has type + # "Union[ndarray[Any, Any], ExtensionArraySupportsAnyAll]", variable has + # type "ndarray[Any, dtype[bool_]]") + result = values.isna() # type: ignore[assignment] + elif isinstance(values, np.rec.recarray): + # GH 48526 + result = _isna_recarray_dtype(values, inf_as_na=inf_as_na) + elif is_string_or_object_np_dtype(values.dtype): + result = _isna_string_dtype(values, inf_as_na=inf_as_na) + elif dtype.kind in "mM": + # this is the NaT pattern + result = values.view("i8") == iNaT + else: + if inf_as_na: + result = ~np.isfinite(values) + else: + result = np.isnan(values) + + return result + + +def _isna_string_dtype(values: np.ndarray, inf_as_na: bool) -> npt.NDArray[np.bool_]: + # Working around NumPy ticket 1542 + dtype = values.dtype + + if dtype.kind in ("S", "U"): + result = np.zeros(values.shape, dtype=bool) + else: + if values.ndim in {1, 2}: + result = libmissing.isnaobj(values, inf_as_na=inf_as_na) + else: + # 0-D, reached via e.g. mask_missing + result = libmissing.isnaobj(values.ravel(), inf_as_na=inf_as_na) + result = result.reshape(values.shape) + + return result + + +def _has_record_inf_value(record_as_array: np.ndarray) -> np.bool_: + is_inf_in_record = np.zeros(len(record_as_array), dtype=bool) + for i, value in enumerate(record_as_array): + is_element_inf = False + try: + is_element_inf = np.isinf(value) + except TypeError: + is_element_inf = False + is_inf_in_record[i] = is_element_inf + + return np.any(is_inf_in_record) + + +def _isna_recarray_dtype( + values: np.rec.recarray, inf_as_na: bool +) -> npt.NDArray[np.bool_]: + result = np.zeros(values.shape, dtype=bool) + for i, record in enumerate(values): + record_as_array = np.array(record.tolist()) + does_record_contain_nan = isna_all(record_as_array) + does_record_contain_inf = False + if inf_as_na: + does_record_contain_inf = bool(_has_record_inf_value(record_as_array)) + result[i] = np.any( + np.logical_or(does_record_contain_nan, does_record_contain_inf) + ) + + return result + + +@overload +def notna(obj: Scalar) -> bool: + ... + + +@overload +def notna( + obj: ArrayLike | Index | list, +) -> npt.NDArray[np.bool_]: + ... + + +@overload +def notna(obj: NDFrameT) -> NDFrameT: + ... + + +# handle unions +@overload +def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: + ... + + +@overload +def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + ... + + +def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: + """ + Detect non-missing values for an array-like object. + + This function takes a scalar or array-like object and indicates + whether values are valid (not missing, which is ``NaN`` in numeric + arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike). + + Parameters + ---------- + obj : array-like or object value + Object to check for *not* null or *non*-missing values. + + Returns + ------- + bool or array-like of bool + For scalar input, returns a scalar boolean. + For array input, returns an array of boolean indicating whether each + corresponding element is valid. + + See Also + -------- + isna : Boolean inverse of pandas.notna. + Series.notna : Detect valid values in a Series. + DataFrame.notna : Detect valid values in a DataFrame. + Index.notna : Detect valid values in an Index. + + Examples + -------- + Scalar arguments (including strings) result in a scalar boolean. + + >>> pd.notna('dog') + True + + >>> pd.notna(pd.NA) + False + + >>> pd.notna(np.nan) + False + + ndarrays result in an ndarray of booleans. + + >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]]) + >>> array + array([[ 1., nan, 3.], + [ 4., 5., nan]]) + >>> pd.notna(array) + array([[ True, False, True], + [ True, True, False]]) + + For indexes, an ndarray of booleans is returned. + + >>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None, + ... "2017-07-08"]) + >>> index + DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'], + dtype='datetime64[ns]', freq=None) + >>> pd.notna(index) + array([ True, True, False, True]) + + For Series and DataFrame, the same type is returned, containing booleans. + + >>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) + >>> df + 0 1 2 + 0 ant bee cat + 1 dog None fly + >>> pd.notna(df) + 0 1 2 + 0 True True True + 1 True False True + + >>> pd.notna(df[1]) + 0 True + 1 False + Name: 1, dtype: bool + """ + res = isna(obj) + if isinstance(res, bool): + return not res + return ~res + + +notnull = notna + + +def array_equivalent( + left, + right, + strict_nan: bool = False, + dtype_equal: bool = False, +) -> bool: + """ + True if two arrays, left and right, have equal non-NaN elements, and NaNs + in corresponding locations. False otherwise. It is assumed that left and + right are NumPy arrays of the same dtype. The behavior of this function + (particularly with respect to NaNs) is not defined if the dtypes are + different. + + Parameters + ---------- + left, right : ndarrays + strict_nan : bool, default False + If True, consider NaN and None to be different. + dtype_equal : bool, default False + Whether `left` and `right` are known to have the same dtype + according to `is_dtype_equal`. Some methods like `BlockManager.equals`. + require that the dtypes match. Setting this to ``True`` can improve + performance, but will give different results for arrays that are + equal but different dtypes. + + Returns + ------- + b : bool + Returns True if the arrays are equivalent. + + Examples + -------- + >>> array_equivalent( + ... np.array([1, 2, np.nan]), + ... np.array([1, 2, np.nan])) + True + >>> array_equivalent( + ... np.array([1, np.nan, 2]), + ... np.array([1, 2, np.nan])) + False + """ + left, right = np.asarray(left), np.asarray(right) + + # shape compat + if left.shape != right.shape: + return False + + if dtype_equal: + # fastpath when we require that the dtypes match (Block.equals) + if left.dtype.kind in "fc": + return _array_equivalent_float(left, right) + elif left.dtype.kind in "mM": + return _array_equivalent_datetimelike(left, right) + elif is_string_or_object_np_dtype(left.dtype): + # TODO: fastpath for pandas' StringDtype + return _array_equivalent_object(left, right, strict_nan) + else: + return np.array_equal(left, right) + + # Slow path when we allow comparing different dtypes. + # Object arrays can contain None, NaN and NaT. + # string dtypes must be come to this path for NumPy 1.7.1 compat + if left.dtype.kind in "OSU" or right.dtype.kind in "OSU": + # Note: `in "OSU"` is non-trivially faster than `in ["O", "S", "U"]` + # or `in ("O", "S", "U")` + return _array_equivalent_object(left, right, strict_nan) + + # NaNs can occur in float and complex arrays. + if left.dtype.kind in "fc": + if not (left.size and right.size): + return True + return ((left == right) | (isna(left) & isna(right))).all() + + elif left.dtype.kind in "mM" or right.dtype.kind in "mM": + # datetime64, timedelta64, Period + if left.dtype != right.dtype: + return False + + left = left.view("i8") + right = right.view("i8") + + # if we have structured dtypes, compare first + if ( + left.dtype.type is np.void or right.dtype.type is np.void + ) and left.dtype != right.dtype: + return False + + return np.array_equal(left, right) + + +def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool: + return bool(((left == right) | (np.isnan(left) & np.isnan(right))).all()) + + +def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray): + return np.array_equal(left.view("i8"), right.view("i8")) + + +def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool): + left = ensure_object(left) + right = ensure_object(right) + + mask: npt.NDArray[np.bool_] | None = None + if strict_nan: + mask = isna(left) & isna(right) + if not mask.any(): + mask = None + + try: + if mask is None: + return lib.array_equivalent_object(left, right) + if not lib.array_equivalent_object(left[~mask], right[~mask]): + return False + left_remaining = left[mask] + right_remaining = right[mask] + except ValueError: + # can raise a ValueError if left and right cannot be + # compared (e.g. nested arrays) + left_remaining = left + right_remaining = right + + for left_value, right_value in zip(left_remaining, right_remaining): + if left_value is NaT and right_value is not NaT: + return False + + elif left_value is libmissing.NA and right_value is not libmissing.NA: + return False + + elif isinstance(left_value, float) and np.isnan(left_value): + if not isinstance(right_value, float) or not np.isnan(right_value): + return False + else: + with warnings.catch_warnings(): + # suppress numpy's "elementwise comparison failed" + warnings.simplefilter("ignore", DeprecationWarning) + try: + if np.any(np.asarray(left_value != right_value)): + return False + except TypeError as err: + if "boolean value of NA is ambiguous" in str(err): + return False + raise + except ValueError: + # numpy can raise a ValueError if left and right cannot be + # compared (e.g. nested arrays) + return False + return True + + +def array_equals(left: ArrayLike, right: ArrayLike) -> bool: + """ + ExtensionArray-compatible implementation of array_equivalent. + """ + if left.dtype != right.dtype: + return False + elif isinstance(left, ABCExtensionArray): + return left.equals(right) + else: + return array_equivalent(left, right, dtype_equal=True) + + +def infer_fill_value(val): + """ + infer the fill value for the nan/NaT from the provided + scalar/ndarray/list-like if we are a NaT, return the correct dtyped + element to provide proper block construction + """ + if not is_list_like(val): + val = [val] + val = np.asarray(val) + if val.dtype.kind in "mM": + return np.array("NaT", dtype=val.dtype) + elif val.dtype == object: + dtype = lib.infer_dtype(ensure_object(val), skipna=False) + if dtype in ["datetime", "datetime64"]: + return np.array("NaT", dtype=DT64NS_DTYPE) + elif dtype in ["timedelta", "timedelta64"]: + return np.array("NaT", dtype=TD64NS_DTYPE) + return np.array(np.nan, dtype=object) + elif val.dtype.kind == "U": + return np.array(np.nan, dtype=val.dtype) + return np.nan + + +def construct_1d_array_from_inferred_fill_value( + value: object, length: int +) -> ArrayLike: + # Find our empty_value dtype by constructing an array + # from our value and doing a .take on it + from pandas.core.algorithms import take_nd + from pandas.core.construction import sanitize_array + from pandas.core.indexes.base import Index + + arr = sanitize_array(value, Index(range(1)), copy=False) + taker = -1 * np.ones(length, dtype=np.intp) + return take_nd(arr, taker) + + +def maybe_fill(arr: np.ndarray) -> np.ndarray: + """ + Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype. + """ + if arr.dtype.kind not in "iub": + arr.fill(np.nan) + return arr + + +def na_value_for_dtype(dtype: DtypeObj, compat: bool = True): + """ + Return a dtype compat na value + + Parameters + ---------- + dtype : string / dtype + compat : bool, default True + + Returns + ------- + np.dtype or a pandas dtype + + Examples + -------- + >>> na_value_for_dtype(np.dtype('int64')) + 0 + >>> na_value_for_dtype(np.dtype('int64'), compat=False) + nan + >>> na_value_for_dtype(np.dtype('float64')) + nan + >>> na_value_for_dtype(np.dtype('bool')) + False + >>> na_value_for_dtype(np.dtype('datetime64[ns]')) + numpy.datetime64('NaT') + """ + + if isinstance(dtype, ExtensionDtype): + return dtype.na_value + elif dtype.kind in "mM": + unit = np.datetime_data(dtype)[0] + return dtype.type("NaT", unit) + elif dtype.kind == "f": + return np.nan + elif dtype.kind in "iu": + if compat: + return 0 + return np.nan + elif dtype.kind == "b": + if compat: + return False + return np.nan + return np.nan + + +def remove_na_arraylike(arr: Series | Index | np.ndarray): + """ + Return array-like containing only true/non-NaN values, possibly empty. + """ + if isinstance(arr.dtype, ExtensionDtype): + return arr[notna(arr)] + else: + return arr[notna(np.asarray(arr))] + + +def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool: + """ + isna check that excludes incompatible dtypes + + Parameters + ---------- + obj : object + dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype + + Returns + ------- + bool + """ + if not lib.is_scalar(obj) or not isna(obj): + return False + elif dtype.kind == "M": + if isinstance(dtype, np.dtype): + # i.e. not tzaware + return not isinstance(obj, (np.timedelta64, Decimal)) + # we have to rule out tznaive dt64("NaT") + return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal)) + elif dtype.kind == "m": + return not isinstance(obj, (np.datetime64, Decimal)) + elif dtype.kind in "iufc": + # Numeric + return obj is not NaT and not isinstance(obj, (np.datetime64, np.timedelta64)) + elif dtype.kind == "b": + # We allow pd.NA, None, np.nan in BooleanArray (same as IntervalDtype) + return lib.is_float(obj) or obj is None or obj is libmissing.NA + + elif dtype == _dtype_str: + # numpy string dtypes to avoid float np.nan + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float)) + + elif dtype == _dtype_object: + # This is needed for Categorical, but is kind of weird + return True + + elif isinstance(dtype, PeriodDtype): + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) + + elif isinstance(dtype, IntervalDtype): + return lib.is_float(obj) or obj is None or obj is libmissing.NA + + elif isinstance(dtype, CategoricalDtype): + return is_valid_na_for_dtype(obj, dtype.categories.dtype) + + # fallback, default to allowing NaN, None, NA, NaT + return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) + + +def isna_all(arr: ArrayLike) -> bool: + """ + Optimized equivalent to isna(arr).all() + """ + total_len = len(arr) + + # Usually it's enough to check but a small fraction of values to see if + # a block is NOT null, chunks should help in such cases. + # parameters 1000 and 40 were chosen arbitrarily + chunk_len = max(total_len // 40, 1000) + + dtype = arr.dtype + if lib.is_np_dtype(dtype, "f"): + checker = nan_checker + + elif (lib.is_np_dtype(dtype, "mM")) or isinstance( + dtype, (DatetimeTZDtype, PeriodDtype) + ): + # error: Incompatible types in assignment (expression has type + # "Callable[[Any], Any]", variable has type "ufunc") + checker = lambda x: np.asarray(x.view("i8")) == iNaT # type: ignore[assignment] + + else: + # error: Incompatible types in assignment (expression has type "Callable[[Any], + # Any]", variable has type "ufunc") + checker = lambda x: _isna_array( # type: ignore[assignment] + x, inf_as_na=INF_AS_NA + ) + + return all( + checker(arr[i : i + chunk_len]).all() for i in range(0, total_len, chunk_len) + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8248f378e2c1acea37bdc2d41065c591360b902a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/__init__.py @@ -0,0 +1,15 @@ +from pandas.core.groupby.generic import ( + DataFrameGroupBy, + NamedAgg, + SeriesGroupBy, +) +from pandas.core.groupby.groupby import GroupBy +from pandas.core.groupby.grouper import Grouper + +__all__ = [ + "DataFrameGroupBy", + "NamedAgg", + "SeriesGroupBy", + "GroupBy", + "Grouper", +] diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8f3d8a8078515865deb09cab7f67eb576108695 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c84a7d19eef08bfaefc15ece22ed946b34c8237 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2facae0aa37b0f98e0da58e89229a034df8e931f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c275b9bc7b1a333571ccab83952d73a7b620304d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/groupby.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dab8a262fe0115e802163c0a929a96a9c6665984 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28a9456aab8208d7f37eccd2e10e478a8224cec0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db448720b7f22ed7e94f1e168f7088d3ceca9f68 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3aa12cfcc321c103beab019004f765eb925a0096 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/base.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/base.py new file mode 100644 index 0000000000000000000000000000000000000000..a443597347283887deb9cbd3eafb5f6d3bb6d9a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/base.py @@ -0,0 +1,121 @@ +""" +Provide basic components for groupby. +""" +from __future__ import annotations + +import dataclasses +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Hashable + + +@dataclasses.dataclass(order=True, frozen=True) +class OutputKey: + label: Hashable + position: int + + +# special case to prevent duplicate plots when catching exceptions when +# forwarding methods from NDFrames +plotting_methods = frozenset(["plot", "hist"]) + +# cythonized transformations or canned "agg+broadcast", which do not +# require postprocessing of the result by transform. +cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"]) + +# List of aggregation/reduction functions. +# These map each group to a single numeric value +reduction_kernels = frozenset( + [ + "all", + "any", + "corrwith", + "count", + "first", + "idxmax", + "idxmin", + "last", + "max", + "mean", + "median", + "min", + "nunique", + "prod", + # as long as `quantile`'s signature accepts only + # a single quantile value, it's a reduction. + # GH#27526 might change that. + "quantile", + "sem", + "size", + "skew", + "std", + "sum", + "var", + ] +) + +# List of transformation functions. +# a transformation is a function that, for each group, +# produces a result that has the same shape as the group. + + +transformation_kernels = frozenset( + [ + "bfill", + "cumcount", + "cummax", + "cummin", + "cumprod", + "cumsum", + "diff", + "ffill", + "fillna", + "ngroup", + "pct_change", + "rank", + "shift", + ] +) + +# these are all the public methods on Grouper which don't belong +# in either of the above lists +groupby_other_methods = frozenset( + [ + "agg", + "aggregate", + "apply", + "boxplot", + # corr and cov return ngroups*ncolumns rows, so they + # are neither a transformation nor a reduction + "corr", + "cov", + "describe", + "dtypes", + "expanding", + "ewm", + "filter", + "get_group", + "groups", + "head", + "hist", + "indices", + "ndim", + "ngroups", + "nth", + "ohlc", + "pipe", + "plot", + "resample", + "rolling", + "tail", + "take", + "transform", + "sample", + "value_counts", + ] +) +# Valid values of `name` for `groupby.transform(name)` +# NOTE: do NOT edit this directly. New additions should be inserted +# into the appropriate list above. +transform_kernel_allowlist = reduction_kernels | transformation_kernels diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/categorical.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab98cf4fe55e9b064db99e61d1245cb83b63dc1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/categorical.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import numpy as np + +from pandas.core.algorithms import unique1d +from pandas.core.arrays.categorical import ( + Categorical, + CategoricalDtype, + recode_for_categories, +) + + +def recode_for_groupby( + c: Categorical, sort: bool, observed: bool +) -> tuple[Categorical, Categorical | None]: + """ + Code the categories to ensure we can groupby for categoricals. + + If observed=True, we return a new Categorical with the observed + categories only. + + If sort=False, return a copy of self, coded with categories as + returned by .unique(), followed by any categories not appearing in + the data. If sort=True, return self. + + This method is needed solely to ensure the categorical index of the + GroupBy result has categories in the order of appearance in the data + (GH-8868). + + Parameters + ---------- + c : Categorical + sort : bool + The value of the sort parameter groupby was called with. + observed : bool + Account only for the observed values + + Returns + ------- + Categorical + If sort=False, the new categories are set to the order of + appearance in codes (unless ordered=True, in which case the + original order is preserved), followed by any unrepresented + categories in the original order. + Categorical or None + If we are observed, return the original categorical, otherwise None + """ + # we only care about observed values + if observed: + # In cases with c.ordered, this is equivalent to + # return c.remove_unused_categories(), c + + unique_codes = unique1d(c.codes) + + take_codes = unique_codes[unique_codes != -1] + if sort: + take_codes = np.sort(take_codes) + + # we recode according to the uniques + categories = c.categories.take(take_codes) + codes = recode_for_categories(c.codes, c.categories, categories) + + # return a new categorical that maps our new codes + # and categories + dtype = CategoricalDtype(categories, ordered=c.ordered) + return Categorical._simple_new(codes, dtype=dtype), c + + # Already sorted according to c.categories; all is fine + if sort: + return c, None + + # sort=False should order groups in as-encountered order (GH-8868) + + # xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories + all_codes = np.arange(c.categories.nunique()) + # GH 38140: exclude nan from indexer for categories + unique_notnan_codes = unique1d(c.codes[c.codes != -1]) + if sort: + unique_notnan_codes = np.sort(unique_notnan_codes) + if len(all_codes) > len(unique_notnan_codes): + # GH 13179: All categories need to be present, even if missing from the data + missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True) + take_codes = np.concatenate((unique_notnan_codes, missing_codes)) + else: + take_codes = unique_notnan_codes + + return Categorical(c, c.unique().categories.take(take_codes)), None diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/generic.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..f2e314046fb749e7d32bf7fc76ca22eb1194a328 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/generic.py @@ -0,0 +1,2852 @@ +""" +Define the SeriesGroupBy and DataFrameGroupBy +classes that hold the groupby interfaces (and some implementations). + +These are user facing as the result of the ``df.groupby(...)`` operations, +which here returns a DataFrameGroupBy object. +""" +from __future__ import annotations + +from collections import abc +from functools import partial +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + NamedTuple, + TypeVar, + Union, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + Interval, + lib, +) +from pandas._libs.hashtable import duplicated +from pandas.errors import SpecificationError +from pandas.util._decorators import ( + Appender, + Substitution, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + ensure_int64, + is_bool, + is_dict_like, + is_integer_dtype, + is_list_like, + is_numeric_dtype, + is_scalar, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + IntervalDtype, +) +from pandas.core.dtypes.inference import is_hashable +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import algorithms +from pandas.core.apply import ( + GroupByApply, + maybe_mangle_lambdas, + reconstruct_func, + validate_func_kwargs, + warn_alias_replacement, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.groupby import ( + base, + ops, +) +from pandas.core.groupby.groupby import ( + GroupBy, + GroupByPlot, + _agg_template_frame, + _agg_template_series, + _apply_docs, + _transform_template, +) +from pandas.core.indexes.api import ( + Index, + MultiIndex, + all_indexes_same, + default_index, +) +from pandas.core.series import Series +from pandas.core.sorting import get_group_index +from pandas.core.util.numba_ import maybe_use_numba + +from pandas.plotting import boxplot_frame_groupby + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + Axis, + AxisInt, + CorrelationMethod, + FillnaOptions, + IndexLabel, + Manager, + Manager2D, + SingleManager, + TakeIndexer, + ) + + from pandas import Categorical + from pandas.core.generic import NDFrame + +# TODO(typing) the return value on this callable should be any *scalar*. +AggScalar = Union[str, Callable[..., Any]] +# TODO: validate types on ScalarResult and move to _typing +# Blocked from using by https://github.com/python/mypy/issues/1484 +# See note at _mangle_lambda_list +ScalarResult = TypeVar("ScalarResult") + + +class NamedAgg(NamedTuple): + """ + Helper for column specific aggregation with control over output column names. + + Subclass of typing.NamedTuple. + + Parameters + ---------- + column : Hashable + Column label in the DataFrame to apply aggfunc. + aggfunc : function or str + Function to apply to the provided column. If string, the name of a built-in + pandas function. + + Examples + -------- + >>> df = pd.DataFrame({"key": [1, 1, 2], "a": [-1, 0, 1], 1: [10, 11, 12]}) + >>> agg_a = pd.NamedAgg(column="a", aggfunc="min") + >>> agg_1 = pd.NamedAgg(column=1, aggfunc=lambda x: np.mean(x)) + >>> df.groupby("key").agg(result_a=agg_a, result_1=agg_1) + result_a result_1 + key + 1 -1 10.5 + 2 1 12.0 + """ + + column: Hashable + aggfunc: AggScalar + + +class SeriesGroupBy(GroupBy[Series]): + def _wrap_agged_manager(self, mgr: Manager) -> Series: + out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes) + out._name = self.obj.name + return out + + def _get_data_to_aggregate( + self, *, numeric_only: bool = False, name: str | None = None + ) -> SingleManager: + ser = self._obj_with_exclusions + single = ser._mgr + if numeric_only and not is_numeric_dtype(ser.dtype): + # GH#41291 match Series behavior + kwd_name = "numeric_only" + raise TypeError( + f"Cannot use {kwd_name}=True with " + f"{type(self).__name__}.{name} and non-numeric dtypes." + ) + return single + + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> s = pd.Series([1, 2, 3, 4]) + + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).min() + 1 1 + 2 3 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).agg('min') + 1 1 + 2 3 + dtype: int64 + + >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max']) + min max + 1 1 2 + 2 3 4 + + The output column names can be controlled by passing + the desired column names and aggregations as keyword arguments. + + >>> s.groupby([1, 1, 2, 2]).agg( + ... minimum='min', + ... maximum='max', + ... ) + minimum maximum + 1 1 2 + 2 3 4 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the aggregating function. + + >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min()) + 1 1.0 + 2 3.0 + dtype: float64 + """ + ) + + @Appender( + _apply_docs["template"].format( + input="series", examples=_apply_docs["series_examples"] + ) + ) + def apply(self, func, *args, **kwargs) -> Series: + return super().apply(func, *args, **kwargs) + + @doc(_agg_template_series, examples=_agg_examples_doc, klass="Series") + def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): + relabeling = func is None + columns = None + if relabeling: + columns, func = validate_func_kwargs(kwargs) + kwargs = {} + + if isinstance(func, str): + if maybe_use_numba(engine) and engine is not None: + # Not all agg functions support numba, only propagate numba kwargs + # if user asks for numba, and engine is not None + # (if engine is None, the called function will handle the case where + # numba is requested via the global option) + kwargs["engine"] = engine + if engine_kwargs is not None: + kwargs["engine_kwargs"] = engine_kwargs + return getattr(self, func)(*args, **kwargs) + + elif isinstance(func, abc.Iterable): + # Catch instances of lists / tuples + # but not the class list / tuple itself. + func = maybe_mangle_lambdas(func) + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + ret = self._aggregate_multiple_funcs(func, *args, **kwargs) + if relabeling: + # columns is not narrowed by mypy from relabeling flag + assert columns is not None # for mypy + ret.columns = columns + if not self.as_index: + ret = ret.reset_index() + return ret + + else: + cyfunc = com.get_cython_func(func) + if cyfunc and not args and not kwargs: + warn_alias_replacement(self, func, cyfunc) + return getattr(self, cyfunc)() + + if maybe_use_numba(engine): + return self._aggregate_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + + if self.ngroups == 0: + # e.g. test_evaluate_with_empty_groups without any groups to + # iterate over, we have no output on which to do dtype + # inference. We default to using the existing dtype. + # xref GH#51445 + obj = self._obj_with_exclusions + return self.obj._constructor( + [], + name=self.obj.name, + index=self._grouper.result_index, + dtype=obj.dtype, + ) + + if self._grouper.nkeys > 1: + return self._python_agg_general(func, *args, **kwargs) + + try: + return self._python_agg_general(func, *args, **kwargs) + except KeyError: + # KeyError raised in test_groupby.test_basic is bc the func does + # a dictionary lookup on group.name, but group name is not + # pinned in _python_agg_general, only in _aggregate_named + result = self._aggregate_named(func, *args, **kwargs) + + warnings.warn( + "Pinning the groupby key to each group in " + f"{type(self).__name__}.agg is deprecated, and cases that " + "relied on it will raise in a future version. " + "If your operation requires utilizing the groupby keys, " + "iterate over the groupby object instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # result is a dict whose keys are the elements of result_index + result = Series(result, index=self._grouper.result_index) + result = self._wrap_aggregated_output(result) + return result + + agg = aggregate + + def _python_agg_general(self, func, *args, **kwargs): + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[func] + warn_alias_replacement(self, orig_func, alias) + f = lambda x: func(x, *args, **kwargs) + + obj = self._obj_with_exclusions + result = self._grouper.agg_series(obj, f) + res = obj._constructor(result, name=obj.name) + return self._wrap_aggregated_output(res) + + def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame: + if isinstance(arg, dict): + if self.as_index: + # GH 15931 + raise SpecificationError("nested renamer is not supported") + else: + # GH#50684 - This accidentally worked in 1.x + msg = ( + "Passing a dictionary to SeriesGroupBy.agg is deprecated " + "and will raise in a future version of pandas. Pass a list " + "of aggregations instead." + ) + warnings.warn( + message=msg, + category=FutureWarning, + stacklevel=find_stack_level(), + ) + arg = list(arg.items()) + elif any(isinstance(x, (tuple, list)) for x in arg): + arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg] + else: + # list of functions / function names + columns = (com.get_callable_name(f) or f for f in arg) + arg = zip(columns, arg) + + results: dict[base.OutputKey, DataFrame | Series] = {} + with com.temp_setattr(self, "as_index", True): + # Combine results using the index, need to adjust index after + # if as_index=False (GH#50724) + for idx, (name, func) in enumerate(arg): + key = base.OutputKey(label=name, position=idx) + results[key] = self.aggregate(func, *args, **kwargs) + + if any(isinstance(x, DataFrame) for x in results.values()): + from pandas import concat + + res_df = concat( + results.values(), axis=1, keys=[key.label for key in results] + ) + return res_df + + indexed_output = {key.position: val for key, val in results.items()} + output = self.obj._constructor_expanddim(indexed_output, index=None) + output.columns = Index(key.label for key in results) + + return output + + def _wrap_applied_output( + self, + data: Series, + values: list[Any], + not_indexed_same: bool = False, + is_transform: bool = False, + ) -> DataFrame | Series: + """ + Wrap the output of SeriesGroupBy.apply into the expected result. + + Parameters + ---------- + data : Series + Input data for groupby operation. + values : List[Any] + Applied output for each group. + not_indexed_same : bool, default False + Whether the applied outputs are not indexed the same as the group axes. + + Returns + ------- + DataFrame or Series + """ + if len(values) == 0: + # GH #6265 + if is_transform: + # GH#47787 see test_group_on_empty_multiindex + res_index = data.index + else: + res_index = self._grouper.result_index + + return self.obj._constructor( + [], + name=self.obj.name, + index=res_index, + dtype=data.dtype, + ) + assert values is not None + + if isinstance(values[0], dict): + # GH #823 #24880 + index = self._grouper.result_index + res_df = self.obj._constructor_expanddim(values, index=index) + res_df = self._reindex_output(res_df) + # if self.observed is False, + # keep all-NaN rows created while re-indexing + res_ser = res_df.stack(future_stack=True) + res_ser.name = self.obj.name + return res_ser + elif isinstance(values[0], (Series, DataFrame)): + result = self._concat_objects( + values, + not_indexed_same=not_indexed_same, + is_transform=is_transform, + ) + if isinstance(result, Series): + result.name = self.obj.name + if not self.as_index and not_indexed_same: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return result + else: + # GH #6265 #24880 + result = self.obj._constructor( + data=values, index=self._grouper.result_index, name=self.obj.name + ) + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return self._reindex_output(result) + + def _aggregate_named(self, func, *args, **kwargs): + # Note: this is very similar to _aggregate_series_pure_python, + # but that does not pin group.name + result = {} + initialized = False + + for name, group in self._grouper.get_iterator( + self._obj_with_exclusions, axis=self.axis + ): + # needed for pandas/tests/groupby/test_groupby.py::test_basic_aggregations + object.__setattr__(group, "name", name) + + output = func(group, *args, **kwargs) + output = ops.extract_result(output) + if not initialized: + # We only do this validation on the first iteration + ops.check_result_array(output, group.dtype) + initialized = True + result[name] = output + + return result + + __examples_series_doc = dedent( + """ + >>> ser = pd.Series([390.0, 350.0, 30.0, 20.0], + ... index=["Falcon", "Falcon", "Parrot", "Parrot"], + ... name="Max Speed") + >>> grouped = ser.groupby([1, 1, 2, 2]) + >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) + Falcon 0.707107 + Falcon -0.707107 + Parrot 0.707107 + Parrot -0.707107 + Name: Max Speed, dtype: float64 + + Broadcast result of the transformation + + >>> grouped.transform(lambda x: x.max() - x.min()) + Falcon 40.0 + Falcon 40.0 + Parrot 10.0 + Parrot 10.0 + Name: Max Speed, dtype: float64 + + >>> grouped.transform("mean") + Falcon 370.0 + Falcon 370.0 + Parrot 25.0 + Parrot 25.0 + Name: Max Speed, dtype: float64 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + for example: + + >>> grouped.transform(lambda x: x.astype(int).max()) + Falcon 390 + Falcon 390 + Parrot 30 + Parrot 30 + Name: Max Speed, dtype: int64 + """ + ) + + @Substitution(klass="Series", example=__examples_series_doc) + @Appender(_transform_template) + def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + return self._transform( + func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs + ) + + def _cython_transform( + self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs + ): + assert axis == 0 # handled by caller + + obj = self._obj_with_exclusions + + try: + result = self._grouper._cython_operation( + "transform", obj._values, how, axis, **kwargs + ) + except NotImplementedError as err: + # e.g. test_groupby_raises_string + raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err + + return obj._constructor(result, index=self.obj.index, name=obj.name) + + def _transform_general( + self, func: Callable, engine, engine_kwargs, *args, **kwargs + ) -> Series: + """ + Transform with a callable `func`. + """ + if maybe_use_numba(engine): + return self._transform_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + assert callable(func) + klass = type(self.obj) + + results = [] + for name, group in self._grouper.get_iterator( + self._obj_with_exclusions, axis=self.axis + ): + # this setattr is needed for test_transform_lambda_with_datetimetz + object.__setattr__(group, "name", name) + res = func(group, *args, **kwargs) + + results.append(klass(res, index=group.index)) + + # check for empty "results" to avoid concat ValueError + if results: + from pandas.core.reshape.concat import concat + + concatenated = concat(results) + result = self._set_result_index_ordered(concatenated) + else: + result = self.obj._constructor(dtype=np.float64) + + result.name = self.obj.name + return result + + def filter(self, func, dropna: bool = True, *args, **kwargs): + """ + Filter elements from groups that don't satisfy a criterion. + + Elements from groups are filtered if they do not satisfy the + boolean criterion specified by func. + + Parameters + ---------- + func : function + Criterion to apply to each group. Should return True or False. + dropna : bool + Drop groups that do not pass the filter. True by default; if False, + groups that evaluate False are filled with NaNs. + + Returns + ------- + Series + + Notes + ----- + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : [1, 2, 3, 4, 5, 6], + ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A') + >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) + 1 2 + 3 4 + 5 6 + Name: B, dtype: int64 + """ + if isinstance(func, str): + wrapper = lambda x: getattr(x, func)(*args, **kwargs) + else: + wrapper = lambda x: func(x, *args, **kwargs) + + # Interpret np.nan as False. + def true_and_notna(x) -> bool: + b = wrapper(x) + return notna(b) and b + + try: + indices = [ + self._get_index(name) + for name, group in self._grouper.get_iterator( + self._obj_with_exclusions, axis=self.axis + ) + if true_and_notna(group) + ] + except (ValueError, TypeError) as err: + raise TypeError("the filter must return a boolean result") from err + + filtered = self._apply_filter(indices, dropna) + return filtered + + def nunique(self, dropna: bool = True) -> Series | DataFrame: + """ + Return number of unique elements in the group. + + Returns + ------- + Series + Number of unique values within each group. + + Examples + -------- + For SeriesGroupby: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).nunique() + a 2 + b 1 + dtype: int64 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 3], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 3 + dtype: int64 + >>> ser.resample('MS').nunique() + 2023-01-01 2 + 2023-02-01 1 + Freq: MS, dtype: int64 + """ + ids, _, ngroups = self._grouper.group_info + val = self.obj._values + codes, uniques = algorithms.factorize(val, use_na_sentinel=dropna, sort=False) + + if self._grouper.has_dropped_na: + mask = ids >= 0 + ids = ids[mask] + codes = codes[mask] + + group_index = get_group_index( + labels=[ids, codes], + shape=(ngroups, len(uniques)), + sort=False, + xnull=dropna, + ) + + if dropna: + mask = group_index >= 0 + if (~mask).any(): + ids = ids[mask] + group_index = group_index[mask] + + mask = duplicated(group_index, "first") + res = np.bincount(ids[~mask], minlength=ngroups) + res = ensure_int64(res) + + ri = self._grouper.result_index + result: Series | DataFrame = self.obj._constructor( + res, index=ri, name=self.obj.name + ) + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + return self._reindex_output(result, fill_value=0) + + @doc(Series.describe) + def describe(self, percentiles=None, include=None, exclude=None) -> Series: + return super().describe( + percentiles=percentiles, include=include, exclude=exclude + ) + + def value_counts( + self, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + bins=None, + dropna: bool = True, + ) -> Series | DataFrame: + name = "proportion" if normalize else "count" + + if bins is None: + result = self._value_counts( + normalize=normalize, sort=sort, ascending=ascending, dropna=dropna + ) + result.name = name + return result + + from pandas.core.reshape.merge import get_join_indexers + from pandas.core.reshape.tile import cut + + ids, _, _ = self._grouper.group_info + val = self.obj._values + + index_names = self._grouper.names + [self.obj.name] + + if isinstance(val.dtype, CategoricalDtype) or ( + bins is not None and not np.iterable(bins) + ): + # scalar bins cannot be done at top level + # in a backward compatible way + # GH38672 relates to categorical dtype + ser = self.apply( + Series.value_counts, + normalize=normalize, + sort=sort, + ascending=ascending, + bins=bins, + ) + ser.name = name + ser.index.names = index_names + return ser + + # groupby removes null keys from groupings + mask = ids != -1 + ids, val = ids[mask], val[mask] + + lab: Index | np.ndarray + if bins is None: + lab, lev = algorithms.factorize(val, sort=True) + llab = lambda lab, inc: lab[inc] + else: + # lab is a Categorical with categories an IntervalIndex + cat_ser = cut(Series(val, copy=False), bins, include_lowest=True) + cat_obj = cast("Categorical", cat_ser._values) + lev = cat_obj.categories + lab = lev.take( + cat_obj.codes, + allow_fill=True, + fill_value=lev._na_value, + ) + llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] + + if isinstance(lab.dtype, IntervalDtype): + # TODO: should we do this inside II? + lab_interval = cast(Interval, lab) + + sorter = np.lexsort((lab_interval.left, lab_interval.right, ids)) + else: + sorter = np.lexsort((lab, ids)) + + ids, lab = ids[sorter], lab[sorter] + + # group boundaries are where group ids change + idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0] + idx = np.r_[0, idchanges] + if not len(ids): + idx = idchanges + + # new values are where sorted labels change + lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1)) + inc = np.r_[True, lchanges] + if not len(val): + inc = lchanges + inc[idx] = True # group boundaries are also new values + out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts + + # num. of times each group should be repeated + rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) + + # multi-index components + codes = self._grouper.reconstructed_codes + codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)] + levels = [ping._group_index for ping in self._grouper.groupings] + [lev] + + if dropna: + mask = codes[-1] != -1 + if mask.all(): + dropna = False + else: + out, codes = out[mask], [level_codes[mask] for level_codes in codes] + + if normalize: + out = out.astype("float") + d = np.diff(np.r_[idx, len(ids)]) + if dropna: + m = ids[lab == -1] + np.add.at(d, m, -1) + acc = rep(d)[mask] + else: + acc = rep(d) + out /= acc + + if sort and bins is None: + cat = ids[inc][mask] if dropna else ids[inc] + sorter = np.lexsort((out if ascending else -out, cat)) + out, codes[-1] = out[sorter], codes[-1][sorter] + + if bins is not None: + # for compat. with libgroupby.value_counts need to ensure every + # bin is present at every index level, null filled with zeros + diff = np.zeros(len(out), dtype="bool") + for level_codes in codes[:-1]: + diff |= np.r_[True, level_codes[1:] != level_codes[:-1]] + + ncat, nbin = diff.sum(), len(levels[-1]) + + left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] + + right = [diff.cumsum() - 1, codes[-1]] + + # error: Argument 1 to "get_join_indexers" has incompatible type + # "List[ndarray[Any, Any]]"; expected "List[Union[Union[ExtensionArray, + # ndarray[Any, Any]], Index, Series]] + _, idx = get_join_indexers( + left, right, sort=False, how="left" # type: ignore[arg-type] + ) + if idx is not None: + out = np.where(idx != -1, out[idx], 0) + + if sort: + sorter = np.lexsort((out if ascending else -out, left[0])) + out, left[-1] = out[sorter], left[-1][sorter] + + # build the multi-index w/ full levels + def build_codes(lev_codes: np.ndarray) -> np.ndarray: + return np.repeat(lev_codes[diff], nbin) + + codes = [build_codes(lev_codes) for lev_codes in codes[:-1]] + codes.append(left[-1]) + + mi = MultiIndex( + levels=levels, codes=codes, names=index_names, verify_integrity=False + ) + + if is_integer_dtype(out.dtype): + out = ensure_int64(out) + result = self.obj._constructor(out, index=mi, name=name) + if not self.as_index: + result = result.reset_index() + return result + + def fillna( + self, + value: object | ArrayLike | None = None, + method: FillnaOptions | None = None, + axis: Axis | None | lib.NoDefault = lib.no_default, + inplace: bool = False, + limit: int | None = None, + downcast: dict | None | lib.NoDefault = lib.no_default, + ) -> Series | None: + """ + Fill NA/NaN values using the specified method within groups. + + .. deprecated:: 2.2.0 + This method is deprecated and will be removed in a future version. + Use the :meth:`.SeriesGroupBy.ffill` or :meth:`.SeriesGroupBy.bfill` + for forward or backward filling instead. If you want to fill with a + single value, use :meth:`Series.fillna` instead. + + Parameters + ---------- + value : scalar, dict, Series, or DataFrame + Value to use to fill holes (e.g. 0), alternately a + dict/Series/DataFrame of values specifying which value to use for + each index (for a Series) or column (for a DataFrame). Values not + in the dict/Series/DataFrame will not be filled. This value cannot + be a list. Users wanting to use the ``value`` argument and not ``method`` + should prefer :meth:`.Series.fillna` as this + will produce the same result and be more performant. + method : {{'bfill', 'ffill', None}}, default None + Method to use for filling holes. ``'ffill'`` will propagate + the last valid observation forward within a group. + ``'bfill'`` will use next valid observation to fill the gap. + axis : {0 or 'index', 1 or 'columns'} + Unused, only for compatibility with :meth:`DataFrameGroupBy.fillna`. + inplace : bool, default False + Broken. Do not set to True. + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill within a group. In other words, + if there is a gap with more than this number of consecutive NaNs, + it will only be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + Returns + ------- + Series + Object with missing values filled within groups. + + See Also + -------- + ffill : Forward fill values within a group. + bfill : Backward fill values within a group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['cat', 'cat', 'cat', 'mouse', 'mouse'] + >>> ser = pd.Series([1, None, None, 2, None], index=lst) + >>> ser + cat 1.0 + cat NaN + cat NaN + mouse 2.0 + mouse NaN + dtype: float64 + >>> ser.groupby(level=0).fillna(0, limit=1) + cat 1.0 + cat 0.0 + cat NaN + mouse 2.0 + mouse 0.0 + dtype: float64 + """ + warnings.warn( + f"{type(self).__name__}.fillna is deprecated and " + "will be removed in a future version. Use obj.ffill() or obj.bfill() " + "for forward or backward filling instead. If you want to fill with a " + f"single value, use {type(self.obj).__name__}.fillna instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + result = self._op_via_apply( + "fillna", + value=value, + method=method, + axis=axis, + inplace=inplace, + limit=limit, + downcast=downcast, + ) + return result + + def take( + self, + indices: TakeIndexer, + axis: Axis | lib.NoDefault = lib.no_default, + **kwargs, + ) -> Series: + """ + Return the elements in the given *positional* indices in each group. + + This means that we are not indexing according to actual values in + the index attribute of the object. We are indexing according to the + actual position of the element in the object. + + If a requested index does not exist for some group, this method will raise. + To get similar behavior that ignores indices that don't exist, see + :meth:`.SeriesGroupBy.nth`. + + Parameters + ---------- + indices : array-like + An array of ints indicating which positions to take in each group. + axis : {0 or 'index', 1 or 'columns', None}, default 0 + The axis on which to select elements. ``0`` means that we are + selecting rows, ``1`` means that we are selecting columns. + For `SeriesGroupBy` this parameter is unused and defaults to 0. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + **kwargs + For compatibility with :meth:`numpy.take`. Has no effect on the + output. + + Returns + ------- + Series + A Series containing the elements taken from each group. + + See Also + -------- + Series.take : Take elements from a Series along an axis. + Series.loc : Select a subset of a DataFrame by labels. + Series.iloc : Select a subset of a DataFrame by positions. + numpy.take : Take elements from an array along an axis. + SeriesGroupBy.nth : Similar to take, won't raise if indices don't exist. + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan), + ... ('rabbit', 'mammal', 15.0)], + ... columns=['name', 'class', 'max_speed'], + ... index=[4, 3, 2, 1, 0]) + >>> df + name class max_speed + 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 lion mammal 80.5 + 1 monkey mammal NaN + 0 rabbit mammal 15.0 + >>> gb = df["name"].groupby([1, 1, 2, 2, 2]) + + Take elements at positions 0 and 1 along the axis 0 in each group (default). + + >>> gb.take([0, 1]) + 1 4 falcon + 3 parrot + 2 2 lion + 1 monkey + Name: name, dtype: object + + We may take elements using negative integers for positive indices, + starting from the end of the object, just like with Python lists. + + >>> gb.take([-1, -2]) + 1 3 parrot + 4 falcon + 2 0 rabbit + 1 monkey + Name: name, dtype: object + """ + result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs) + return result + + def skew( + self, + axis: Axis | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ) -> Series: + """ + Return unbiased skew within groups. + + Normalized by N-1. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Axis for the function to be applied on. + This parameter is only for compatibility with DataFrame and is unused. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values when computing the result. + + numeric_only : bool, default False + Include only float, int, boolean columns. Not implemented for Series. + + **kwargs + Additional keyword arguments to be passed to the function. + + Returns + ------- + Series + + See Also + -------- + Series.skew : Return unbiased skew over requested axis. + + Examples + -------- + >>> ser = pd.Series([390., 350., 357., np.nan, 22., 20., 30.], + ... index=['Falcon', 'Falcon', 'Falcon', 'Falcon', + ... 'Parrot', 'Parrot', 'Parrot'], + ... name="Max Speed") + >>> ser + Falcon 390.0 + Falcon 350.0 + Falcon 357.0 + Falcon NaN + Parrot 22.0 + Parrot 20.0 + Parrot 30.0 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).skew() + Falcon 1.525174 + Parrot 1.457863 + Name: Max Speed, dtype: float64 + >>> ser.groupby(level=0).skew(skipna=False) + Falcon NaN + Parrot 1.457863 + Name: Max Speed, dtype: float64 + """ + if axis is lib.no_default: + axis = 0 + + if axis != 0: + result = self._op_via_apply( + "skew", + axis=axis, + skipna=skipna, + numeric_only=numeric_only, + **kwargs, + ) + return result + + def alt(obj): + # This should not be reached since the cython path should raise + # TypeError and not NotImplementedError. + raise TypeError(f"'skew' is not supported for dtype={obj.dtype}") + + return self._cython_agg_general( + "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs + ) + + @property + @doc(Series.plot.__doc__) + def plot(self) -> GroupByPlot: + result = GroupByPlot(self) + return result + + @doc(Series.nlargest.__doc__) + def nlargest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + f = partial(Series.nlargest, n=n, keep=keep) + data = self._obj_with_exclusions + # Don't change behavior if result index happens to be the same, i.e. + # already ordered and n >= all group sizes. + result = self._python_apply_general(f, data, not_indexed_same=True) + return result + + @doc(Series.nsmallest.__doc__) + def nsmallest( + self, n: int = 5, keep: Literal["first", "last", "all"] = "first" + ) -> Series: + f = partial(Series.nsmallest, n=n, keep=keep) + data = self._obj_with_exclusions + # Don't change behavior if result index happens to be the same, i.e. + # already ordered and n >= all group sizes. + result = self._python_apply_general(f, data, not_indexed_same=True) + return result + + @doc(Series.idxmin.__doc__) + def idxmin( + self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True + ) -> Series: + return self._idxmax_idxmin("idxmin", axis=axis, skipna=skipna) + + @doc(Series.idxmax.__doc__) + def idxmax( + self, axis: Axis | lib.NoDefault = lib.no_default, skipna: bool = True + ) -> Series: + return self._idxmax_idxmin("idxmax", axis=axis, skipna=skipna) + + @doc(Series.corr.__doc__) + def corr( + self, + other: Series, + method: CorrelationMethod = "pearson", + min_periods: int | None = None, + ) -> Series: + result = self._op_via_apply( + "corr", other=other, method=method, min_periods=min_periods + ) + return result + + @doc(Series.cov.__doc__) + def cov( + self, other: Series, min_periods: int | None = None, ddof: int | None = 1 + ) -> Series: + result = self._op_via_apply( + "cov", other=other, min_periods=min_periods, ddof=ddof + ) + return result + + @property + def is_monotonic_increasing(self) -> Series: + """ + Return whether each group's values are monotonically increasing. + + Returns + ------- + Series + + Examples + -------- + >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot']) + >>> s.groupby(level=0).is_monotonic_increasing + Falcon False + Parrot True + dtype: bool + """ + return self.apply(lambda ser: ser.is_monotonic_increasing) + + @property + def is_monotonic_decreasing(self) -> Series: + """ + Return whether each group's values are monotonically decreasing. + + Returns + ------- + Series + + Examples + -------- + >>> s = pd.Series([2, 1, 3, 4], index=['Falcon', 'Falcon', 'Parrot', 'Parrot']) + >>> s.groupby(level=0).is_monotonic_decreasing + Falcon True + Parrot False + dtype: bool + """ + return self.apply(lambda ser: ser.is_monotonic_decreasing) + + @doc(Series.hist.__doc__) + def hist( + self, + by=None, + ax=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + figsize: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, + ): + result = self._op_via_apply( + "hist", + by=by, + ax=ax, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + figsize=figsize, + bins=bins, + backend=backend, + legend=legend, + **kwargs, + ) + return result + + @property + @doc(Series.dtype.__doc__) + def dtype(self) -> Series: + return self.apply(lambda ser: ser.dtype) + + def unique(self) -> Series: + """ + Return unique values for each group. + + It returns unique values for each of the grouped values. Returned in + order of appearance. Hash table-based unique, therefore does NOT sort. + + Returns + ------- + Series + Unique values for each of the grouped values. + + See Also + -------- + Series.unique : Return unique values of Series object. + + Examples + -------- + >>> df = pd.DataFrame([('Chihuahua', 'dog', 6.1), + ... ('Beagle', 'dog', 15.2), + ... ('Chihuahua', 'dog', 6.9), + ... ('Persian', 'cat', 9.2), + ... ('Chihuahua', 'dog', 7), + ... ('Persian', 'cat', 8.8)], + ... columns=['breed', 'animal', 'height_in']) + >>> df + breed animal height_in + 0 Chihuahua dog 6.1 + 1 Beagle dog 15.2 + 2 Chihuahua dog 6.9 + 3 Persian cat 9.2 + 4 Chihuahua dog 7.0 + 5 Persian cat 8.8 + >>> ser = df.groupby('animal')['breed'].unique() + >>> ser + animal + cat [Persian] + dog [Chihuahua, Beagle] + Name: breed, dtype: object + """ + result = self._op_via_apply("unique") + return result + + +class DataFrameGroupBy(GroupBy[DataFrame]): + _agg_examples_doc = dedent( + """ + Examples + -------- + >>> data = {"A": [1, 1, 2, 2], + ... "B": [1, 2, 3, 4], + ... "C": [0.362838, 0.227877, 1.267767, -0.562860]} + >>> df = pd.DataFrame(data) + >>> df + A B C + 0 1 1 0.362838 + 1 1 2 0.227877 + 2 2 3 1.267767 + 3 2 4 -0.562860 + + The aggregation is for each column. + + >>> df.groupby('A').agg('min') + B C + A + 1 1 0.227877 + 2 3 -0.562860 + + Multiple aggregations + + >>> df.groupby('A').agg(['min', 'max']) + B C + min max min max + A + 1 1 2 0.227877 0.362838 + 2 3 4 -0.562860 1.267767 + + Select a column for aggregation + + >>> df.groupby('A').B.agg(['min', 'max']) + min max + A + 1 1 2 + 2 3 4 + + User-defined function for aggregation + + >>> df.groupby('A').agg(lambda x: sum(x) + 2) + B C + A + 1 5 2.590715 + 2 9 2.704907 + + Different aggregations per column + + >>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'}) + B C + min max sum + A + 1 1 2 0.590715 + 2 3 4 0.704907 + + To control the output names with different aggregations per column, + pandas supports "named aggregation" + + >>> df.groupby("A").agg( + ... b_min=pd.NamedAgg(column="B", aggfunc="min"), + ... c_sum=pd.NamedAgg(column="C", aggfunc="sum") + ... ) + b_min c_sum + A + 1 1 0.590715 + 2 3 0.704907 + + - The keywords are the *output* column names + - The values are tuples whose first element is the column to select + and the second element is the aggregation to apply to that column. + Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields + ``['column', 'aggfunc']`` to make it clearer what the arguments are. + As usual, the aggregation can be a callable or a string alias. + + See :ref:`groupby.aggregate.named` for more. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the aggregating function. + + >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min()) + B + A + 1 1.0 + 2 3.0 + """ + ) + + @doc(_agg_template_frame, examples=_agg_examples_doc, klass="DataFrame") + def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): + relabeling, func, columns, order = reconstruct_func(func, **kwargs) + func = maybe_mangle_lambdas(func) + + if maybe_use_numba(engine): + # Not all agg functions support numba, only propagate numba kwargs + # if user asks for numba + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + + op = GroupByApply(self, func, args=args, kwargs=kwargs) + result = op.agg() + if not is_dict_like(func) and result is not None: + # GH #52849 + if not self.as_index and is_list_like(func): + return result.reset_index() + else: + return result + elif relabeling: + # this should be the only (non-raising) case with relabeling + # used reordered index of columns + result = cast(DataFrame, result) + result = result.iloc[:, order] + result = cast(DataFrame, result) + # error: Incompatible types in assignment (expression has type + # "Optional[List[str]]", variable has type + # "Union[Union[Union[ExtensionArray, ndarray[Any, Any]], + # Index, Series], Sequence[Any]]") + result.columns = columns # type: ignore[assignment] + + if result is None: + # Remove the kwargs we inserted + # (already stored in engine, engine_kwargs arguments) + if "engine" in kwargs: + del kwargs["engine"] + del kwargs["engine_kwargs"] + # at this point func is not a str, list-like, dict-like, + # or a known callable(e.g. sum) + if maybe_use_numba(engine): + return self._aggregate_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + # grouper specific aggregations + if self._grouper.nkeys > 1: + # test_groupby_as_index_series_scalar gets here with 'not self.as_index' + return self._python_agg_general(func, *args, **kwargs) + elif args or kwargs: + # test_pass_args_kwargs gets here (with and without as_index) + # can't return early + result = self._aggregate_frame(func, *args, **kwargs) + + elif self.axis == 1: + # _aggregate_multiple_funcs does not allow self.axis == 1 + # Note: axis == 1 precludes 'not self.as_index', see __init__ + result = self._aggregate_frame(func) + return result + + else: + # try to treat as if we are passing a list + gba = GroupByApply(self, [func], args=(), kwargs={}) + try: + result = gba.agg() + + except ValueError as err: + if "No objects to concatenate" not in str(err): + raise + # _aggregate_frame can fail with e.g. func=Series.mode, + # where it expects 1D values but would be getting 2D values + # In other tests, using aggregate_frame instead of GroupByApply + # would give correct values but incorrect dtypes + # object vs float64 in test_cython_agg_empty_buckets + # float64 vs int64 in test_category_order_apply + result = self._aggregate_frame(func) + + else: + # GH#32040, GH#35246 + # e.g. test_groupby_as_index_select_column_sum_empty_df + result = cast(DataFrame, result) + result.columns = self._obj_with_exclusions.columns.copy() + + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + + return result + + agg = aggregate + + def _python_agg_general(self, func, *args, **kwargs): + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[func] + warn_alias_replacement(self, orig_func, alias) + f = lambda x: func(x, *args, **kwargs) + + if self.ngroups == 0: + # e.g. test_evaluate_with_empty_groups different path gets different + # result dtype in empty case. + return self._python_apply_general(f, self._selected_obj, is_agg=True) + + obj = self._obj_with_exclusions + if self.axis == 1: + obj = obj.T + + if not len(obj.columns): + # e.g. test_margins_no_values_no_cols + return self._python_apply_general(f, self._selected_obj) + + output: dict[int, ArrayLike] = {} + for idx, (name, ser) in enumerate(obj.items()): + result = self._grouper.agg_series(ser, f) + output[idx] = result + + res = self.obj._constructor(output) + res.columns = obj.columns.copy(deep=False) + return self._wrap_aggregated_output(res) + + def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: + if self._grouper.nkeys != 1: + raise AssertionError("Number of keys must be 1") + + obj = self._obj_with_exclusions + + result: dict[Hashable, NDFrame | np.ndarray] = {} + for name, grp_df in self._grouper.get_iterator(obj, self.axis): + fres = func(grp_df, *args, **kwargs) + result[name] = fres + + result_index = self._grouper.result_index + other_ax = obj.axes[1 - self.axis] + out = self.obj._constructor(result, index=other_ax, columns=result_index) + if self.axis == 0: + out = out.T + + return out + + def _wrap_applied_output( + self, + data: DataFrame, + values: list, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + if len(values) == 0: + if is_transform: + # GH#47787 see test_group_on_empty_multiindex + res_index = data.index + else: + res_index = self._grouper.result_index + + result = self.obj._constructor(index=res_index, columns=data.columns) + result = result.astype(data.dtypes, copy=False) + return result + + # GH12824 + # using values[0] here breaks test_groupby_apply_none_first + first_not_none = next(com.not_none(*values), None) + + if first_not_none is None: + # GH9684 - All values are None, return an empty frame. + return self.obj._constructor() + elif isinstance(first_not_none, DataFrame): + return self._concat_objects( + values, + not_indexed_same=not_indexed_same, + is_transform=is_transform, + ) + + key_index = self._grouper.result_index if self.as_index else None + + if isinstance(first_not_none, (np.ndarray, Index)): + # GH#1738: values is list of arrays of unequal lengths + # fall through to the outer else clause + # TODO: sure this is right? we used to do this + # after raising AttributeError above + # GH 18930 + if not is_hashable(self._selection): + # error: Need type annotation for "name" + name = tuple(self._selection) # type: ignore[var-annotated, arg-type] + else: + # error: Incompatible types in assignment + # (expression has type "Hashable", variable + # has type "Tuple[Any, ...]") + name = self._selection # type: ignore[assignment] + return self.obj._constructor_sliced(values, index=key_index, name=name) + elif not isinstance(first_not_none, Series): + # values are not series or array-like but scalars + # self._selection not passed through to Series as the + # result should not take the name of original selection + # of columns + if self.as_index: + return self.obj._constructor_sliced(values, index=key_index) + else: + result = self.obj._constructor(values, columns=[self._selection]) + result = self._insert_inaxis_grouper(result) + return result + else: + # values are Series + return self._wrap_applied_output_series( + values, + not_indexed_same, + first_not_none, + key_index, + is_transform, + ) + + def _wrap_applied_output_series( + self, + values: list[Series], + not_indexed_same: bool, + first_not_none, + key_index: Index | None, + is_transform: bool, + ) -> DataFrame | Series: + kwargs = first_not_none._construct_axes_dict() + backup = Series(**kwargs) + values = [x if (x is not None) else backup for x in values] + + all_indexed_same = all_indexes_same(x.index for x in values) + + if not all_indexed_same: + # GH 8467 + return self._concat_objects( + values, + not_indexed_same=True, + is_transform=is_transform, + ) + + # Combine values + # vstack+constructor is faster than concat and handles MI-columns + stacked_values = np.vstack([np.asarray(v) for v in values]) + + if self.axis == 0: + index = key_index + columns = first_not_none.index.copy() + if columns.name is None: + # GH6124 - propagate name of Series when it's consistent + names = {v.name for v in values} + if len(names) == 1: + columns.name = next(iter(names)) + else: + index = first_not_none.index + columns = key_index + stacked_values = stacked_values.T + + if stacked_values.dtype == object: + # We'll have the DataFrame constructor do inference + stacked_values = stacked_values.tolist() + result = self.obj._constructor(stacked_values, index=index, columns=columns) + + if not self.as_index: + result = self._insert_inaxis_grouper(result) + + return self._reindex_output(result) + + def _cython_transform( + self, + how: str, + numeric_only: bool = False, + axis: AxisInt = 0, + **kwargs, + ) -> DataFrame: + assert axis == 0 # handled by caller + + # With self.axis == 0, we have multi-block tests + # e.g. test_rank_min_int, test_cython_transform_frame + # test_transform_numeric_ret + # With self.axis == 1, _get_data_to_aggregate does a transpose + # so we always have a single block. + mgr: Manager2D = self._get_data_to_aggregate( + numeric_only=numeric_only, name=how + ) + + def arr_func(bvalues: ArrayLike) -> ArrayLike: + return self._grouper._cython_operation( + "transform", bvalues, how, 1, **kwargs + ) + + # We could use `mgr.apply` here and not have to set_axis, but + # we would have to do shape gymnastics for ArrayManager compat + res_mgr = mgr.grouped_reduce(arr_func) + res_mgr.set_axis(1, mgr.axes[1]) + + res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes) + res_df = self._maybe_transpose_result(res_df) + return res_df + + def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs): + if maybe_use_numba(engine): + return self._transform_with_numba( + func, *args, engine_kwargs=engine_kwargs, **kwargs + ) + from pandas.core.reshape.concat import concat + + applied = [] + obj = self._obj_with_exclusions + gen = self._grouper.get_iterator(obj, axis=self.axis) + fast_path, slow_path = self._define_paths(func, *args, **kwargs) + + # Determine whether to use slow or fast path by evaluating on the first group. + # Need to handle the case of an empty generator and process the result so that + # it does not need to be computed again. + try: + name, group = next(gen) + except StopIteration: + pass + else: + # 2023-02-27 No tests broken by disabling this pinning + object.__setattr__(group, "name", name) + try: + path, res = self._choose_path(fast_path, slow_path, group) + except ValueError as err: + # e.g. test_transform_with_non_scalar_group + msg = "transform must return a scalar value for each group" + raise ValueError(msg) from err + if group.size > 0: + res = _wrap_transform_general_frame(self.obj, group, res) + applied.append(res) + + # Compute and process with the remaining groups + for name, group in gen: + if group.size == 0: + continue + # 2023-02-27 No tests broken by disabling this pinning + object.__setattr__(group, "name", name) + res = path(group) + + res = _wrap_transform_general_frame(self.obj, group, res) + applied.append(res) + + concat_index = obj.columns if self.axis == 0 else obj.index + other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1 + concatenated = concat(applied, axis=self.axis, verify_integrity=False) + concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False) + return self._set_result_index_ordered(concatenated) + + __examples_dataframe_doc = dedent( + """ + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : ['one', 'one', 'two', 'three', + ... 'two', 'two'], + ... 'C' : [1, 5, 5, 2, 5, 5], + ... 'D' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A')[['C', 'D']] + >>> grouped.transform(lambda x: (x - x.mean()) / x.std()) + C D + 0 -1.154701 -0.577350 + 1 0.577350 0.000000 + 2 0.577350 1.154701 + 3 -1.154701 -1.000000 + 4 0.577350 -0.577350 + 5 0.577350 1.000000 + + Broadcast result of the transformation + + >>> grouped.transform(lambda x: x.max() - x.min()) + C D + 0 4.0 6.0 + 1 3.0 8.0 + 2 4.0 6.0 + 3 3.0 8.0 + 4 4.0 6.0 + 5 3.0 8.0 + + >>> grouped.transform("mean") + C D + 0 3.666667 4.0 + 1 4.000000 5.0 + 2 3.666667 4.0 + 3 4.000000 5.0 + 4 3.666667 4.0 + 5 4.000000 5.0 + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + for example: + + >>> grouped.transform(lambda x: x.astype(int).max()) + C D + 0 5 8 + 1 5 9 + 2 5 8 + 3 5 9 + 4 5 8 + 5 5 9 + """ + ) + + @Substitution(klass="DataFrame", example=__examples_dataframe_doc) + @Appender(_transform_template) + def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + return self._transform( + func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs + ) + + def _define_paths(self, func, *args, **kwargs): + if isinstance(func, str): + fast_path = lambda group: getattr(group, func)(*args, **kwargs) + slow_path = lambda group: group.apply( + lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis + ) + else: + fast_path = lambda group: func(group, *args, **kwargs) + slow_path = lambda group: group.apply( + lambda x: func(x, *args, **kwargs), axis=self.axis + ) + return fast_path, slow_path + + def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame): + path = slow_path + res = slow_path(group) + + if self.ngroups == 1: + # no need to evaluate multiple paths when only + # a single group exists + return path, res + + # if we make it here, test if we can use the fast path + try: + res_fast = fast_path(group) + except AssertionError: + raise # pragma: no cover + except Exception: + # GH#29631 For user-defined function, we can't predict what may be + # raised; see test_transform.test_transform_fastpath_raises + return path, res + + # verify fast path returns either: + # a DataFrame with columns equal to group.columns + # OR a Series with index equal to group.columns + if isinstance(res_fast, DataFrame): + if not res_fast.columns.equals(group.columns): + return path, res + elif isinstance(res_fast, Series): + if not res_fast.index.equals(group.columns): + return path, res + else: + return path, res + + if res_fast.equals(res): + path = fast_path + + return path, res + + def filter(self, func, dropna: bool = True, *args, **kwargs): + """ + Filter elements from groups that don't satisfy a criterion. + + Elements from groups are filtered if they do not satisfy the + boolean criterion specified by func. + + Parameters + ---------- + func : function + Criterion to apply to each group. Should return True or False. + dropna : bool + Drop groups that do not pass the filter. True by default; if False, + groups that evaluate False are filled with NaNs. + + Returns + ------- + DataFrame + + Notes + ----- + Each subframe is endowed the attribute 'name' in case you need to know + which group you are working on. + + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', + ... 'foo', 'bar'], + ... 'B' : [1, 2, 3, 4, 5, 6], + ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) + >>> grouped = df.groupby('A') + >>> grouped.filter(lambda x: x['B'].mean() > 3.) + A B C + 1 bar 2 5.0 + 3 bar 4 1.0 + 5 bar 6 9.0 + """ + indices = [] + + obj = self._selected_obj + gen = self._grouper.get_iterator(obj, axis=self.axis) + + for name, group in gen: + # 2023-02-27 no tests are broken this pinning, but it is documented in the + # docstring above. + object.__setattr__(group, "name", name) + + res = func(group, *args, **kwargs) + + try: + res = res.squeeze() + except AttributeError: # allow e.g., scalars and frames to pass + pass + + # interpret the result of the filter + if is_bool(res) or (is_scalar(res) and isna(res)): + if notna(res) and res: + indices.append(self._get_index(name)) + else: + # non scalars aren't allowed + raise TypeError( + f"filter function returned a {type(res).__name__}, " + "but expected a scalar bool" + ) + + return self._apply_filter(indices, dropna) + + def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: + if self.axis == 1: + # GH 37725 + raise ValueError("Cannot subset columns when using axis=1") + # per GH 23566 + if isinstance(key, tuple) and len(key) > 1: + # if len == 1, then it becomes a SeriesGroupBy and this is actually + # valid syntax, so don't raise + raise ValueError( + "Cannot subset columns with a tuple with more than one element. " + "Use a list instead." + ) + return super().__getitem__(key) + + def _gotitem(self, key, ndim: int, subset=None): + """ + sub-classes to define + return a sliced object + + Parameters + ---------- + key : string / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + if ndim == 2: + if subset is None: + subset = self.obj + return DataFrameGroupBy( + subset, + self.keys, + axis=self.axis, + level=self.level, + grouper=self._grouper, + exclusions=self.exclusions, + selection=key, + as_index=self.as_index, + sort=self.sort, + group_keys=self.group_keys, + observed=self.observed, + dropna=self.dropna, + ) + elif ndim == 1: + if subset is None: + subset = self.obj[key] + return SeriesGroupBy( + subset, + self.keys, + level=self.level, + grouper=self._grouper, + exclusions=self.exclusions, + selection=key, + as_index=self.as_index, + sort=self.sort, + group_keys=self.group_keys, + observed=self.observed, + dropna=self.dropna, + ) + + raise AssertionError("invalid ndim for _gotitem") + + def _get_data_to_aggregate( + self, *, numeric_only: bool = False, name: str | None = None + ) -> Manager2D: + obj = self._obj_with_exclusions + if self.axis == 1: + mgr = obj.T._mgr + else: + mgr = obj._mgr + + if numeric_only: + mgr = mgr.get_numeric_data() + return mgr + + def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame: + return self.obj._constructor_from_mgr(mgr, axes=mgr.axes) + + def _apply_to_column_groupbys(self, func) -> DataFrame: + from pandas.core.reshape.concat import concat + + obj = self._obj_with_exclusions + columns = obj.columns + sgbs = [ + SeriesGroupBy( + obj.iloc[:, i], + selection=colname, + grouper=self._grouper, + exclusions=self.exclusions, + observed=self.observed, + ) + for i, colname in enumerate(obj.columns) + ] + results = [func(sgb) for sgb in sgbs] + + if not len(results): + # concat would raise + res_df = DataFrame([], columns=columns, index=self._grouper.result_index) + else: + res_df = concat(results, keys=columns, axis=1) + + if not self.as_index: + res_df.index = default_index(len(res_df)) + res_df = self._insert_inaxis_grouper(res_df) + return res_df + + def nunique(self, dropna: bool = True) -> DataFrame: + """ + Return DataFrame with counts of unique elements in each position. + + Parameters + ---------- + dropna : bool, default True + Don't include NaN in the counts. + + Returns + ------- + nunique: DataFrame + + Examples + -------- + >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', + ... 'ham', 'ham'], + ... 'value1': [1, 5, 5, 2, 5, 5], + ... 'value2': list('abbaxy')}) + >>> df + id value1 value2 + 0 spam 1 a + 1 egg 5 b + 2 egg 5 b + 3 spam 2 a + 4 ham 5 x + 5 ham 5 y + + >>> df.groupby('id').nunique() + value1 value2 + id + egg 1 1 + ham 1 2 + spam 2 1 + + Check for rows with the same id but conflicting values: + + >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) + id value1 value2 + 0 spam 1 a + 3 spam 2 a + 4 ham 5 x + 5 ham 5 y + """ + + if self.axis != 0: + # see test_groupby_crash_on_nunique + return self._python_apply_general( + lambda sgb: sgb.nunique(dropna), self._obj_with_exclusions, is_agg=True + ) + + return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna)) + + def idxmax( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ) -> DataFrame: + """ + Return index of first occurrence of maximum over requested axis. + + NA/null values are excluded. + + Parameters + ---------- + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + + .. versionchanged:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series + Indexes of maxima along the specified axis. + + Raises + ------ + ValueError + * If the row/column is empty + + See Also + -------- + Series.idxmax : Return index of the maximum element. + + Notes + ----- + This method is the DataFrame version of ``ndarray.argmax``. + + Examples + -------- + Consider a dataset containing food consumption in Argentina. + + >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], + ... 'co2_emissions': [37.2, 19.66, 1712]}, + ... index=['Pork', 'Wheat Products', 'Beef']) + + >>> df + consumption co2_emissions + Pork 10.51 37.20 + Wheat Products 103.11 19.66 + Beef 55.48 1712.00 + + By default, it returns the index for the maximum value in each column. + + >>> df.idxmax() + consumption Wheat Products + co2_emissions Beef + dtype: object + + To return the index for the maximum value in each row, use ``axis="columns"``. + + >>> df.idxmax(axis="columns") + Pork co2_emissions + Wheat Products consumption + Beef co2_emissions + dtype: object + """ + return self._idxmax_idxmin( + "idxmax", axis=axis, numeric_only=numeric_only, skipna=skipna + ) + + def idxmin( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ) -> DataFrame: + """ + Return index of first occurrence of minimum over requested axis. + + NA/null values are excluded. + + Parameters + ---------- + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + + .. versionchanged:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + Returns + ------- + Series + Indexes of minima along the specified axis. + + Raises + ------ + ValueError + * If the row/column is empty + + See Also + -------- + Series.idxmin : Return index of the minimum element. + + Notes + ----- + This method is the DataFrame version of ``ndarray.argmin``. + + Examples + -------- + Consider a dataset containing food consumption in Argentina. + + >>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48], + ... 'co2_emissions': [37.2, 19.66, 1712]}, + ... index=['Pork', 'Wheat Products', 'Beef']) + + >>> df + consumption co2_emissions + Pork 10.51 37.20 + Wheat Products 103.11 19.66 + Beef 55.48 1712.00 + + By default, it returns the index for the minimum value in each column. + + >>> df.idxmin() + consumption Pork + co2_emissions Wheat Products + dtype: object + + To return the index for the minimum value in each row, use ``axis="columns"``. + + >>> df.idxmin(axis="columns") + Pork consumption + Wheat Products co2_emissions + Beef consumption + dtype: object + """ + return self._idxmax_idxmin( + "idxmin", axis=axis, numeric_only=numeric_only, skipna=skipna + ) + + boxplot = boxplot_frame_groupby + + def value_counts( + self, + subset: Sequence[Hashable] | None = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> DataFrame | Series: + """ + Return a Series or DataFrame containing counts of unique rows. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + subset : list-like, optional + Columns to use when counting unique combinations. + normalize : bool, default False + Return proportions rather than frequencies. + sort : bool, default True + Sort by frequencies. + ascending : bool, default False + Sort in ascending order. + dropna : bool, default True + Don't include counts of rows that contain NA values. + + Returns + ------- + Series or DataFrame + Series if the groupby as_index is True, otherwise DataFrame. + + See Also + -------- + Series.value_counts: Equivalent method on Series. + DataFrame.value_counts: Equivalent method on DataFrame. + SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy. + + Notes + ----- + - If the groupby as_index is True then the returned Series will have a + MultiIndex with one level per input column. + - If the groupby as_index is False then the returned DataFrame will have an + additional column with the value_counts. The column is labelled 'count' or + 'proportion', depending on the ``normalize`` parameter. + + By default, rows that contain any NA values are omitted from + the result. + + By default, the result will be in descending order so that the + first element of each group is the most frequently-occurring row. + + Examples + -------- + >>> df = pd.DataFrame({ + ... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'], + ... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'], + ... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR'] + ... }) + + >>> df + gender education country + 0 male low US + 1 male medium FR + 2 female high US + 3 male low FR + 4 female high FR + 5 male low FR + + >>> df.groupby('gender').value_counts() + gender education country + female high FR 1 + US 1 + male low FR 2 + US 1 + medium FR 1 + Name: count, dtype: int64 + + >>> df.groupby('gender').value_counts(ascending=True) + gender education country + female high FR 1 + US 1 + male low US 1 + medium FR 1 + low FR 2 + Name: count, dtype: int64 + + >>> df.groupby('gender').value_counts(normalize=True) + gender education country + female high FR 0.50 + US 0.50 + male low FR 0.50 + US 0.25 + medium FR 0.25 + Name: proportion, dtype: float64 + + >>> df.groupby('gender', as_index=False).value_counts() + gender education country count + 0 female high FR 1 + 1 female high US 1 + 2 male low FR 2 + 3 male low US 1 + 4 male medium FR 1 + + >>> df.groupby('gender', as_index=False).value_counts(normalize=True) + gender education country proportion + 0 female high FR 0.50 + 1 female high US 0.50 + 2 male low FR 0.50 + 3 male low US 0.25 + 4 male medium FR 0.25 + """ + return self._value_counts(subset, normalize, sort, ascending, dropna) + + def fillna( + self, + value: Hashable | Mapping | Series | DataFrame | None = None, + method: FillnaOptions | None = None, + axis: Axis | None | lib.NoDefault = lib.no_default, + inplace: bool = False, + limit: int | None = None, + downcast=lib.no_default, + ) -> DataFrame | None: + """ + Fill NA/NaN values using the specified method within groups. + + .. deprecated:: 2.2.0 + This method is deprecated and will be removed in a future version. + Use the :meth:`.DataFrameGroupBy.ffill` or :meth:`.DataFrameGroupBy.bfill` + for forward or backward filling instead. If you want to fill with a + single value, use :meth:`DataFrame.fillna` instead. + + Parameters + ---------- + value : scalar, dict, Series, or DataFrame + Value to use to fill holes (e.g. 0), alternately a + dict/Series/DataFrame of values specifying which value to use for + each index (for a Series) or column (for a DataFrame). Values not + in the dict/Series/DataFrame will not be filled. This value cannot + be a list. Users wanting to use the ``value`` argument and not ``method`` + should prefer :meth:`.DataFrame.fillna` as this + will produce the same result and be more performant. + method : {{'bfill', 'ffill', None}}, default None + Method to use for filling holes. ``'ffill'`` will propagate + the last valid observation forward within a group. + ``'bfill'`` will use next valid observation to fill the gap. + axis : {0 or 'index', 1 or 'columns'} + Axis along which to fill missing values. When the :class:`DataFrameGroupBy` + ``axis`` argument is ``0``, using ``axis=1`` here will produce + the same results as :meth:`.DataFrame.fillna`. When the + :class:`DataFrameGroupBy` ``axis`` argument is ``1``, using ``axis=0`` + or ``axis=1`` here will produce the same results. + inplace : bool, default False + Broken. Do not set to True. + limit : int, default None + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill within a group. In other words, + if there is a gap with more than this number of consecutive NaNs, + it will only be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. Must be greater than 0 if not None. + downcast : dict, default is None + A dict of item->dtype of what to downcast if possible, + or the string 'infer' which will try to downcast to an appropriate + equal type (e.g. float64 to int64 if possible). + + Returns + ------- + DataFrame + Object with missing values filled. + + See Also + -------- + ffill : Forward fill values within a group. + bfill : Backward fill values within a group. + + Examples + -------- + >>> df = pd.DataFrame( + ... { + ... "key": [0, 0, 1, 1, 1], + ... "A": [np.nan, 2, np.nan, 3, np.nan], + ... "B": [2, 3, np.nan, np.nan, np.nan], + ... "C": [np.nan, np.nan, 2, np.nan, np.nan], + ... } + ... ) + >>> df + key A B C + 0 0 NaN 2.0 NaN + 1 0 2.0 3.0 NaN + 2 1 NaN NaN 2.0 + 3 1 3.0 NaN NaN + 4 1 NaN NaN NaN + + Propagate non-null values forward or backward within each group along columns. + + >>> df.groupby("key").fillna(method="ffill") + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN 2.0 + + >>> df.groupby("key").fillna(method="bfill") + A B C + 0 2.0 2.0 NaN + 1 2.0 3.0 NaN + 2 3.0 NaN 2.0 + 3 3.0 NaN NaN + 4 NaN NaN NaN + + Propagate non-null values forward or backward within each group along rows. + + >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="ffill").T + key A B C + 0 0.0 0.0 2.0 2.0 + 1 0.0 2.0 3.0 3.0 + 2 1.0 1.0 NaN 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 1.0 NaN NaN + + >>> df.T.groupby(np.array([0, 0, 1, 1])).fillna(method="bfill").T + key A B C + 0 0.0 NaN 2.0 NaN + 1 0.0 2.0 3.0 NaN + 2 1.0 NaN 2.0 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 NaN NaN NaN + + Only replace the first NaN element within a group along rows. + + >>> df.groupby("key").fillna(method="ffill", limit=1) + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN NaN + """ + warnings.warn( + f"{type(self).__name__}.fillna is deprecated and " + "will be removed in a future version. Use obj.ffill() or obj.bfill() " + "for forward or backward filling instead. If you want to fill with a " + f"single value, use {type(self.obj).__name__}.fillna instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + result = self._op_via_apply( + "fillna", + value=value, + method=method, + axis=axis, + inplace=inplace, + limit=limit, + downcast=downcast, + ) + return result + + def take( + self, + indices: TakeIndexer, + axis: Axis | None | lib.NoDefault = lib.no_default, + **kwargs, + ) -> DataFrame: + """ + Return the elements in the given *positional* indices in each group. + + This means that we are not indexing according to actual values in + the index attribute of the object. We are indexing according to the + actual position of the element in the object. + + If a requested index does not exist for some group, this method will raise. + To get similar behavior that ignores indices that don't exist, see + :meth:`.DataFrameGroupBy.nth`. + + Parameters + ---------- + indices : array-like + An array of ints indicating which positions to take. + axis : {0 or 'index', 1 or 'columns', None}, default 0 + The axis on which to select elements. ``0`` means that we are + selecting rows, ``1`` means that we are selecting columns. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + **kwargs + For compatibility with :meth:`numpy.take`. Has no effect on the + output. + + Returns + ------- + DataFrame + An DataFrame containing the elements taken from each group. + + See Also + -------- + DataFrame.take : Take elements from a Series along an axis. + DataFrame.loc : Select a subset of a DataFrame by labels. + DataFrame.iloc : Select a subset of a DataFrame by positions. + numpy.take : Take elements from an array along an axis. + + Examples + -------- + >>> df = pd.DataFrame([('falcon', 'bird', 389.0), + ... ('parrot', 'bird', 24.0), + ... ('lion', 'mammal', 80.5), + ... ('monkey', 'mammal', np.nan), + ... ('rabbit', 'mammal', 15.0)], + ... columns=['name', 'class', 'max_speed'], + ... index=[4, 3, 2, 1, 0]) + >>> df + name class max_speed + 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 lion mammal 80.5 + 1 monkey mammal NaN + 0 rabbit mammal 15.0 + >>> gb = df.groupby([1, 1, 2, 2, 2]) + + Take elements at positions 0 and 1 along the axis 0 (default). + + Note how the indices selected in the result do not correspond to + our input indices 0 and 1. That's because we are selecting the 0th + and 1st rows, not rows whose indices equal 0 and 1. + + >>> gb.take([0, 1]) + name class max_speed + 1 4 falcon bird 389.0 + 3 parrot bird 24.0 + 2 2 lion mammal 80.5 + 1 monkey mammal NaN + + The order of the specified indices influences the order in the result. + Here, the order is swapped from the previous example. + + >>> gb.take([1, 0]) + name class max_speed + 1 3 parrot bird 24.0 + 4 falcon bird 389.0 + 2 1 monkey mammal NaN + 2 lion mammal 80.5 + + Take elements at indices 1 and 2 along the axis 1 (column selection). + + We may take elements using negative integers for positive indices, + starting from the end of the object, just like with Python lists. + + >>> gb.take([-1, -2]) + name class max_speed + 1 3 parrot bird 24.0 + 4 falcon bird 389.0 + 2 0 rabbit mammal 15.0 + 1 monkey mammal NaN + """ + result = self._op_via_apply("take", indices=indices, axis=axis, **kwargs) + return result + + def skew( + self, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + **kwargs, + ) -> DataFrame: + """ + Return unbiased skew within groups. + + Normalized by N-1. + + Parameters + ---------- + axis : {0 or 'index', 1 or 'columns', None}, default 0 + Axis for the function to be applied on. + + Specifying ``axis=None`` will apply the aggregation across both axes. + + .. versionadded:: 2.0.0 + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + skipna : bool, default True + Exclude NA/null values when computing the result. + + numeric_only : bool, default False + Include only float, int, boolean columns. + + **kwargs + Additional keyword arguments to be passed to the function. + + Returns + ------- + DataFrame + + See Also + -------- + DataFrame.skew : Return unbiased skew over requested axis. + + Examples + -------- + >>> arrays = [['falcon', 'parrot', 'cockatoo', 'kiwi', + ... 'lion', 'monkey', 'rabbit'], + ... ['bird', 'bird', 'bird', 'bird', + ... 'mammal', 'mammal', 'mammal']] + >>> index = pd.MultiIndex.from_arrays(arrays, names=('name', 'class')) + >>> df = pd.DataFrame({'max_speed': [389.0, 24.0, 70.0, np.nan, + ... 80.5, 21.5, 15.0]}, + ... index=index) + >>> df + max_speed + name class + falcon bird 389.0 + parrot bird 24.0 + cockatoo bird 70.0 + kiwi bird NaN + lion mammal 80.5 + monkey mammal 21.5 + rabbit mammal 15.0 + >>> gb = df.groupby(["class"]) + >>> gb.skew() + max_speed + class + bird 1.628296 + mammal 1.669046 + >>> gb.skew(skipna=False) + max_speed + class + bird NaN + mammal 1.669046 + """ + if axis is lib.no_default: + axis = 0 + + if axis != 0: + result = self._op_via_apply( + "skew", + axis=axis, + skipna=skipna, + numeric_only=numeric_only, + **kwargs, + ) + return result + + def alt(obj): + # This should not be reached since the cython path should raise + # TypeError and not NotImplementedError. + raise TypeError(f"'skew' is not supported for dtype={obj.dtype}") + + return self._cython_agg_general( + "skew", alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs + ) + + @property + @doc(DataFrame.plot.__doc__) + def plot(self) -> GroupByPlot: + result = GroupByPlot(self) + return result + + @doc(DataFrame.corr.__doc__) + def corr( + self, + method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson", + min_periods: int = 1, + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "corr", method=method, min_periods=min_periods, numeric_only=numeric_only + ) + return result + + @doc(DataFrame.cov.__doc__) + def cov( + self, + min_periods: int | None = None, + ddof: int | None = 1, + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "cov", min_periods=min_periods, ddof=ddof, numeric_only=numeric_only + ) + return result + + @doc(DataFrame.hist.__doc__) + def hist( + self, + column: IndexLabel | None = None, + by=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + ax=None, + sharex: bool = False, + sharey: bool = False, + figsize: tuple[int, int] | None = None, + layout: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, + ): + result = self._op_via_apply( + "hist", + column=column, + by=by, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + ax=ax, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + bins=bins, + backend=backend, + legend=legend, + **kwargs, + ) + return result + + @property + @doc(DataFrame.dtypes.__doc__) + def dtypes(self) -> Series: + # GH#51045 + warnings.warn( + f"{type(self).__name__}.dtypes is deprecated and will be removed in " + "a future version. Check the dtypes on the base object instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + + # error: Incompatible return value type (got "DataFrame", expected "Series") + return self._python_apply_general( # type: ignore[return-value] + lambda df: df.dtypes, self._selected_obj + ) + + @doc(DataFrame.corrwith.__doc__) + def corrwith( + self, + other: DataFrame | Series, + axis: Axis | lib.NoDefault = lib.no_default, + drop: bool = False, + method: CorrelationMethod = "pearson", + numeric_only: bool = False, + ) -> DataFrame: + result = self._op_via_apply( + "corrwith", + other=other, + axis=axis, + drop=drop, + method=method, + numeric_only=numeric_only, + ) + return result + + +def _wrap_transform_general_frame( + obj: DataFrame, group: DataFrame, res: DataFrame | Series +) -> DataFrame: + from pandas import concat + + if isinstance(res, Series): + # we need to broadcast across the + # other dimension; this will preserve dtypes + # GH14457 + if res.index.is_(obj.index): + res_frame = concat([res] * len(group.columns), axis=1) + res_frame.columns = group.columns + res_frame.index = group.index + else: + res_frame = obj._constructor( + np.tile(res.values, (len(group.index), 1)), + columns=group.columns, + index=group.index, + ) + assert isinstance(res_frame, DataFrame) + return res_frame + elif isinstance(res, DataFrame) and not res.index.is_(group.index): + return res._align_frame(group)[0] + else: + return res diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/groupby.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/groupby.py new file mode 100644 index 0000000000000000000000000000000000000000..db8949788567b76bd8df0580d19dcc3e5069e923 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/groupby.py @@ -0,0 +1,5997 @@ +""" +Provide the groupby split-apply-combine paradigm. Define the GroupBy +class providing the base-class of operations. + +The SeriesGroupBy and DataFrameGroupBy sub-class +(defined in pandas.core.groupby.generic) +expose these user-facing objects to provide specific functionality. +""" +from __future__ import annotations + +from collections.abc import ( + Hashable, + Iterator, + Mapping, + Sequence, +) +import datetime +from functools import ( + partial, + wraps, +) +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Callable, + Literal, + TypeVar, + Union, + cast, + final, +) +import warnings + +import numpy as np + +from pandas._config.config import option_context + +from pandas._libs import ( + Timestamp, + lib, +) +from pandas._libs.algos import rank_1d +import pandas._libs.groupby as libgroupby +from pandas._libs.missing import NA +from pandas._typing import ( + AnyArrayLike, + ArrayLike, + Axis, + AxisInt, + DtypeObj, + FillnaOptions, + IndexLabel, + NDFrameT, + PositionalIndexer, + RandomState, + Scalar, + T, + npt, +) +from pandas.compat.numpy import function as nv +from pandas.errors import ( + AbstractMethodError, + DataError, +) +from pandas.util._decorators import ( + Appender, + Substitution, + cache_readonly, + doc, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + coerce_indexer_dtype, + ensure_dtype_can_hold_na, +) +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_float_dtype, + is_hashable, + is_integer, + is_integer_dtype, + is_list_like, + is_numeric_dtype, + is_object_dtype, + is_scalar, + needs_i8_conversion, + pandas_dtype, +) +from pandas.core.dtypes.missing import ( + isna, + na_value_for_dtype, + notna, +) + +from pandas.core import ( + algorithms, + sample, +) +from pandas.core._numba import executor +from pandas.core.apply import warn_alias_replacement +from pandas.core.arrays import ( + ArrowExtensionArray, + BaseMaskedArray, + Categorical, + ExtensionArray, + FloatingArray, + IntegerArray, + SparseArray, +) +from pandas.core.arrays.string_ import StringDtype +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) +from pandas.core.base import ( + PandasObject, + SelectionMixin, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.generic import NDFrame +from pandas.core.groupby import ( + base, + numba_, + ops, +) +from pandas.core.groupby.grouper import get_grouper +from pandas.core.groupby.indexing import ( + GroupByIndexingMixin, + GroupByNthSelector, +) +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, + RangeIndex, + default_index, +) +from pandas.core.internals.blocks import ensure_block_shape +from pandas.core.series import Series +from pandas.core.sorting import get_group_index_sorter +from pandas.core.util.numba_ import ( + get_jit_arguments, + maybe_use_numba, +) + +if TYPE_CHECKING: + from typing import Any + + from pandas.core.resample import Resampler + from pandas.core.window import ( + ExpandingGroupby, + ExponentialMovingWindowGroupby, + RollingGroupby, + ) + +_common_see_also = """ + See Also + -------- + Series.%(name)s : Apply a function %(name)s to a Series. + DataFrame.%(name)s : Apply a function %(name)s + to each row or column of a DataFrame. +""" + +_apply_docs = { + "template": """ + Apply function ``func`` group-wise and combine the results together. + + The function passed to ``apply`` must take a {input} as its first + argument and return a DataFrame, Series or scalar. ``apply`` will + then take care of combining the results back together into a single + dataframe or series. ``apply`` is therefore a highly flexible + grouping method. + + While ``apply`` is a very flexible method, its downside is that + using it can be quite a bit slower than using more specific methods + like ``agg`` or ``transform``. Pandas offers a wide range of method that will + be much faster than using ``apply`` for their specific purposes, so try to + use them before reaching for ``apply``. + + Parameters + ---------- + func : callable + A callable that takes a {input} as its first argument, and + returns a dataframe, a series or a scalar. In addition the + callable may take positional and keyword arguments. + include_groups : bool, default True + When True, will attempt to apply ``func`` to the groupings in + the case that they are columns of the DataFrame. If this raises a + TypeError, the result will be computed with the groupings excluded. + When False, the groupings will be excluded when applying ``func``. + + .. versionadded:: 2.2.0 + + .. deprecated:: 2.2.0 + + Setting include_groups to True is deprecated. Only the value + False will be allowed in a future version of pandas. + + args, kwargs : tuple and dict + Optional positional and keyword arguments to pass to ``func``. + + Returns + ------- + Series or DataFrame + + See Also + -------- + pipe : Apply function to the full GroupBy object instead of to each + group. + aggregate : Apply aggregate function to the GroupBy object. + transform : Apply function column-by-column to the GroupBy object. + Series.apply : Apply a function to a Series. + DataFrame.apply : Apply a function to each row or column of a DataFrame. + + Notes + ----- + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. + + Functions that mutate the passed object can produce unexpected + behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` + for more details. + + Examples + -------- + {examples} + """, + "dataframe_examples": """ + >>> df = pd.DataFrame({'A': 'a a b'.split(), + ... 'B': [1, 2, 3], + ... 'C': [4, 6, 5]}) + >>> g1 = df.groupby('A', group_keys=False) + >>> g2 = df.groupby('A', group_keys=True) + + Notice that ``g1`` and ``g2`` have two groups, ``a`` and ``b``, and only + differ in their ``group_keys`` argument. Calling `apply` in various ways, + we can get different grouping results: + + Example 1: below the function passed to `apply` takes a DataFrame as + its argument and returns a DataFrame. `apply` combines the result for + each group together into a new DataFrame: + + >>> g1[['B', 'C']].apply(lambda x: x / x.sum()) + B C + 0 0.333333 0.4 + 1 0.666667 0.6 + 2 1.000000 1.0 + + In the above, the groups are not part of the index. We can have them included + by using ``g2`` where ``group_keys=True``: + + >>> g2[['B', 'C']].apply(lambda x: x / x.sum()) + B C + A + a 0 0.333333 0.4 + 1 0.666667 0.6 + b 2 1.000000 1.0 + + Example 2: The function passed to `apply` takes a DataFrame as + its argument and returns a Series. `apply` combines the result for + each group together into a new DataFrame. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``. + + >>> g1[['B', 'C']].apply(lambda x: x.astype(float).max() - x.min()) + B C + A + a 1.0 2.0 + b 0.0 0.0 + + >>> g2[['B', 'C']].apply(lambda x: x.astype(float).max() - x.min()) + B C + A + a 1.0 2.0 + b 0.0 0.0 + + The ``group_keys`` argument has no effect here because the result is not + like-indexed (i.e. :ref:`a transform `) when compared + to the input. + + Example 3: The function passed to `apply` takes a DataFrame as + its argument and returns a scalar. `apply` combines the result for + each group together into a Series, including setting the index as + appropriate: + + >>> g1.apply(lambda x: x.C.max() - x.B.min(), include_groups=False) + A + a 5 + b 2 + dtype: int64""", + "series_examples": """ + >>> s = pd.Series([0, 1, 2], index='a a b'.split()) + >>> g1 = s.groupby(s.index, group_keys=False) + >>> g2 = s.groupby(s.index, group_keys=True) + + From ``s`` above we can see that ``g`` has two groups, ``a`` and ``b``. + Notice that ``g1`` have ``g2`` have two groups, ``a`` and ``b``, and only + differ in their ``group_keys`` argument. Calling `apply` in various ways, + we can get different grouping results: + + Example 1: The function passed to `apply` takes a Series as + its argument and returns a Series. `apply` combines the result for + each group together into a new Series. + + .. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``. + + >>> g1.apply(lambda x: x * 2 if x.name == 'a' else x / 2) + a 0.0 + a 2.0 + b 1.0 + dtype: float64 + + In the above, the groups are not part of the index. We can have them included + by using ``g2`` where ``group_keys=True``: + + >>> g2.apply(lambda x: x * 2 if x.name == 'a' else x / 2) + a a 0.0 + a 2.0 + b b 1.0 + dtype: float64 + + Example 2: The function passed to `apply` takes a Series as + its argument and returns a scalar. `apply` combines the result for + each group together into a Series, including setting the index as + appropriate: + + >>> g1.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64 + + The ``group_keys`` argument has no effect here because the result is not + like-indexed (i.e. :ref:`a transform `) when compared + to the input. + + >>> g2.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64""", +} + +_groupby_agg_method_template = """ +Compute {fname} of group values. + +Parameters +---------- +numeric_only : bool, default {no} + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + +min_count : int, default {mc} + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + +Returns +------- +Series or DataFrame + Computed {fname} of values within each group. + +Examples +-------- +{example} +""" + +_groupby_agg_method_engine_template = """ +Compute {fname} of group values. + +Parameters +---------- +numeric_only : bool, default {no} + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None``. + +min_count : int, default {mc} + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + +engine : str, default None {e} + * ``'cython'`` : Runs rolling apply through C-extensions from cython. + * ``'numba'`` : Runs rolling apply through JIT compiled code from numba. + Only available when ``raw`` is set to ``True``. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None {ek} + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to both the ``func`` and the ``apply`` groupby aggregation. + +Returns +------- +Series or DataFrame + Computed {fname} of values within each group. + +Examples +-------- +{example} +""" + +_pipe_template = """ +Apply a ``func`` with arguments to this %(klass)s object and return its result. + +Use `.pipe` when you want to improve readability by chaining together +functions that expect Series, DataFrames, GroupBy or Resampler objects. +Instead of writing + +>>> h = lambda x, arg2, arg3: x + 1 - arg2 * arg3 +>>> g = lambda x, arg1: x * 5 / arg1 +>>> f = lambda x: x ** 4 +>>> df = pd.DataFrame([["a", 4], ["b", 5]], columns=["group", "value"]) +>>> h(g(f(df.groupby('group')), arg1=1), arg2=2, arg3=3) # doctest: +SKIP + +You can write + +>>> (df.groupby('group') +... .pipe(f) +... .pipe(g, arg1=1) +... .pipe(h, arg2=2, arg3=3)) # doctest: +SKIP + +which is much more readable. + +Parameters +---------- +func : callable or tuple of (callable, str) + Function to apply to this %(klass)s object or, alternatively, + a `(callable, data_keyword)` tuple where `data_keyword` is a + string indicating the keyword of `callable` that expects the + %(klass)s object. +args : iterable, optional + Positional arguments passed into `func`. +kwargs : dict, optional + A dictionary of keyword arguments passed into `func`. + +Returns +------- +the return type of `func`. + +See Also +-------- +Series.pipe : Apply a function with arguments to a series. +DataFrame.pipe: Apply a function with arguments to a dataframe. +apply : Apply function to each group instead of to the + full %(klass)s object. + +Notes +----- +See more `here +`_ + +Examples +-------- +%(examples)s +""" + +_transform_template = """ +Call function producing a same-indexed %(klass)s on each group. + +Returns a %(klass)s having the same indexes as the original object +filled with the transformed values. + +Parameters +---------- +f : function, str + Function to apply to each group. See the Notes section below for requirements. + + Accepted inputs are: + + - String + - Python function + - Numba JIT function with ``engine='numba'`` specified. + + Only passing a single function is supported with this engine. + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + + If a string is chosen, then it needs to be the name + of the groupby method you want to use. +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or the global setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be + applied to the function + +**kwargs + Keyword arguments to be passed into func. + +Returns +------- +%(klass)s + +See Also +-------- +%(klass)s.groupby.apply : Apply function ``func`` group-wise and combine + the results together. +%(klass)s.groupby.aggregate : Aggregate using one or more + operations over the specified axis. +%(klass)s.transform : Call ``func`` on self producing a %(klass)s with the + same axis shape as self. + +Notes +----- +Each group is endowed the attribute 'name' in case you need to know +which group you are working on. + +The current implementation imposes three requirements on f: + +* f must return a value that either has the same shape as the input + subframe or can be broadcast to the shape of the input subframe. + For example, if `f` returns a scalar it will be broadcast to have the + same shape as the input subframe. +* if this is a DataFrame, f must support application column-by-column + in the subframe. If f also supports application to the entire subframe, + then a fast path is used starting from the second chunk. +* f must not mutate groups. Mutation is not supported and may + produce unexpected results. See :ref:`gotchas.udf-mutation` for more details. + +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. + +.. versionchanged:: 2.0.0 + + When using ``.transform`` on a grouped DataFrame and the transformation function + returns a DataFrame, pandas now aligns the result's index + with the input's index. You can call ``.to_numpy()`` on the + result of the transformation function to avoid alignment. + +Examples +-------- +%(example)s""" + +_agg_template_series = """ +Aggregate using one or more operations over the specified axis. + +Parameters +---------- +func : function, str, list, dict or None + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - None, in which case ``**kwargs`` are used with Named Aggregation. Here the + output has one column for each element in ``**kwargs``. The name of the + column is keyword, whereas the value determines the aggregation used to compute + the values in the column. + + Can also accept a Numba JIT function with + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. + + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + + .. deprecated:: 2.1.0 + + Passing a dictionary is deprecated and will raise in a future version + of pandas. Pass a list of aggregations instead. +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + +**kwargs + * If ``func`` is None, ``**kwargs`` are used to define the output names and + aggregations via Named Aggregation. See ``func`` entry. + * Otherwise, keyword arguments to be passed into func. + +Returns +------- +{klass} + +See Also +-------- +{klass}.groupby.apply : Apply function func group-wise + and combine the results together. +{klass}.groupby.transform : Transforms the Series on each group + based on the given function. +{klass}.aggregate : Aggregate using one or more + operations over the specified axis. + +Notes +----- +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. +{examples}""" + +_agg_template_frame = """ +Aggregate using one or more operations over the specified axis. + +Parameters +---------- +func : function, str, list, dict or None + Function to use for aggregating the data. If a function, must either + work when passed a {klass} or when passed to {klass}.apply. + + Accepted combinations are: + + - function + - string function name + - list of functions and/or function names, e.g. ``[np.sum, 'mean']`` + - dict of axis labels -> functions, function names or list of such. + - None, in which case ``**kwargs`` are used with Named Aggregation. Here the + output has one column for each element in ``**kwargs``. The name of the + column is keyword, whereas the value determines the aggregation used to compute + the values in the column. + + Can also accept a Numba JIT function with + ``engine='numba'`` specified. Only passing a single function is supported + with this engine. + + If the ``'numba'`` engine is chosen, the function must be + a user defined function with ``values`` and ``index`` as the + first and second arguments respectively in the function signature. + Each group's index will be passed to the user defined function + and optionally available for use. + +*args + Positional arguments to pass to func. +engine : str, default None + * ``'cython'`` : Runs the function through C-extensions from cython. + * ``'numba'`` : Runs the function through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + +engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + +**kwargs + * If ``func`` is None, ``**kwargs`` are used to define the output names and + aggregations via Named Aggregation. See ``func`` entry. + * Otherwise, keyword arguments to be passed into func. + +Returns +------- +{klass} + +See Also +-------- +{klass}.groupby.apply : Apply function func group-wise + and combine the results together. +{klass}.groupby.transform : Transforms the Series on each group + based on the given function. +{klass}.aggregate : Aggregate using one or more + operations over the specified axis. + +Notes +----- +When using ``engine='numba'``, there will be no "fall back" behavior internally. +The group data and group index will be passed as numpy arrays to the JITed +user defined function, and no alternative execution attempts will be tried. + +Functions that mutate the passed object can produce unexpected +behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` +for more details. + +.. versionchanged:: 1.3.0 + + The resulting dtype will reflect the return value of the passed ``func``, + see the examples below. +{examples}""" + + +@final +class GroupByPlot(PandasObject): + """ + Class implementing the .plot attribute for groupby objects. + """ + + def __init__(self, groupby: GroupBy) -> None: + self._groupby = groupby + + def __call__(self, *args, **kwargs): + def f(self): + return self.plot(*args, **kwargs) + + f.__name__ = "plot" + return self._groupby._python_apply_general(f, self._groupby._selected_obj) + + def __getattr__(self, name: str): + def attr(*args, **kwargs): + def f(self): + return getattr(self.plot, name)(*args, **kwargs) + + return self._groupby._python_apply_general(f, self._groupby._selected_obj) + + return attr + + +_KeysArgType = Union[ + Hashable, + list[Hashable], + Callable[[Hashable], Hashable], + list[Callable[[Hashable], Hashable]], + Mapping[Hashable, Hashable], +] + + +class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin): + _hidden_attrs = PandasObject._hidden_attrs | { + "as_index", + "axis", + "dropna", + "exclusions", + "grouper", + "group_keys", + "keys", + "level", + "obj", + "observed", + "sort", + } + + axis: AxisInt + _grouper: ops.BaseGrouper + keys: _KeysArgType | None = None + level: IndexLabel | None = None + group_keys: bool + + @final + def __len__(self) -> int: + return len(self.groups) + + @final + def __repr__(self) -> str: + # TODO: Better repr for GroupBy object + return object.__repr__(self) + + @final + @property + def grouper(self) -> ops.BaseGrouper: + warnings.warn( + f"{type(self).__name__}.grouper is deprecated and will be removed in a " + "future version of pandas.", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + return self._grouper + + @final + @property + def groups(self) -> dict[Hashable, np.ndarray]: + """ + Dict {group name -> group labels}. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).groups + {'a': ['a', 'a'], 'b': ['b']} + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"]) + >>> df + a b c + 0 1 2 3 + 1 1 5 6 + 2 7 8 9 + >>> df.groupby(by=["a"]).groups + {1: [0, 1], 7: [2]} + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').groups + {Timestamp('2023-01-01 00:00:00'): 2, Timestamp('2023-02-01 00:00:00'): 4} + """ + return self._grouper.groups + + @final + @property + def ngroups(self) -> int: + return self._grouper.ngroups + + @final + @property + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + """ + Dict {group name -> group indices}. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).indices + {'a': array([0, 1]), 'b': array([2])} + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby(by=["a"]).indices + {1: array([0, 1]), 7: array([2])} + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').indices + defaultdict(, {Timestamp('2023-01-01 00:00:00'): [0, 1], + Timestamp('2023-02-01 00:00:00'): [2, 3]}) + """ + return self._grouper.indices + + @final + def _get_indices(self, names): + """ + Safe get multiple indices, translate keys for + datelike to underlying repr. + """ + + def get_converter(s): + # possibly convert to the actual key types + # in the indices, could be a Timestamp or a np.datetime64 + if isinstance(s, datetime.datetime): + return lambda key: Timestamp(key) + elif isinstance(s, np.datetime64): + return lambda key: Timestamp(key).asm8 + else: + return lambda key: key + + if len(names) == 0: + return [] + + if len(self.indices) > 0: + index_sample = next(iter(self.indices)) + else: + index_sample = None # Dummy sample + + name_sample = names[0] + if isinstance(index_sample, tuple): + if not isinstance(name_sample, tuple): + msg = "must supply a tuple to get_group with multiple grouping keys" + raise ValueError(msg) + if not len(name_sample) == len(index_sample): + try: + # If the original grouper was a tuple + return [self.indices[name] for name in names] + except KeyError as err: + # turns out it wasn't a tuple + msg = ( + "must supply a same-length tuple to get_group " + "with multiple grouping keys" + ) + raise ValueError(msg) from err + + converters = [get_converter(s) for s in index_sample] + names = (tuple(f(n) for f, n in zip(converters, name)) for name in names) + + else: + converter = get_converter(index_sample) + names = (converter(name) for name in names) + + return [self.indices.get(name, []) for name in names] + + @final + def _get_index(self, name): + """ + Safe get index, translate keys for datelike to underlying repr. + """ + return self._get_indices([name])[0] + + @final + @cache_readonly + def _selected_obj(self): + # Note: _selected_obj is always just `self.obj` for SeriesGroupBy + if isinstance(self.obj, Series): + return self.obj + + if self._selection is not None: + if is_hashable(self._selection): + # i.e. a single key, so selecting it will return a Series. + # In this case, _obj_with_exclusions would wrap the key + # in a list and return a single-column DataFrame. + return self.obj[self._selection] + + # Otherwise _selection is equivalent to _selection_list, so + # _selected_obj matches _obj_with_exclusions, so we can reuse + # that and avoid making a copy. + return self._obj_with_exclusions + + return self.obj + + @final + def _dir_additions(self) -> set[str]: + return self.obj._dir_additions() + + @Substitution( + klass="GroupBy", + examples=dedent( + """\ + >>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]}) + >>> df + A B + 0 a 1 + 1 b 2 + 2 a 3 + 3 b 4 + + To get the difference between each groups maximum and minimum value in one + pass, you can do + + >>> df.groupby('A').pipe(lambda x: x.max() - x.min()) + B + A + a 2 + b 2""" + ), + ) + @Appender(_pipe_template) + def pipe( + self, + func: Callable[..., T] | tuple[Callable[..., T], str], + *args, + **kwargs, + ) -> T: + return com.pipe(self, func, *args, **kwargs) + + @final + def get_group(self, name, obj=None) -> DataFrame | Series: + """ + Construct DataFrame from group with provided name. + + Parameters + ---------- + name : object + The name of the group to get as a DataFrame. + obj : DataFrame, default None + The DataFrame to take the DataFrame out of. If + it is None, the object groupby was called on will + be used. + + .. deprecated:: 2.1.0 + The obj is deprecated and will be removed in a future version. + Do ``df.iloc[gb.indices.get(name)]`` + instead of ``gb.get_group(name, obj=df)``. + + Returns + ------- + same type as obj + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).get_group("a") + a 1 + a 2 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby(by=["a"]).get_group((1,)) + a b c + owl 1 2 3 + toucan 1 5 6 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').get_group('2023-01-01') + 2023-01-01 1 + 2023-01-15 2 + dtype: int64 + """ + keys = self.keys + level = self.level + # mypy doesn't recognize level/keys as being sized when passed to len + if (is_list_like(level) and len(level) == 1) or ( # type: ignore[arg-type] + is_list_like(keys) and len(keys) == 1 # type: ignore[arg-type] + ): + # GH#25971 + if isinstance(name, tuple) and len(name) == 1: + # Allow users to pass tuples of length 1 to silence warning + name = name[0] + elif not isinstance(name, tuple): + warnings.warn( + "When grouping with a length-1 list-like, " + "you will need to pass a length-1 tuple to get_group in a future " + "version of pandas. Pass `(name,)` instead of `name` to silence " + "this warning.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + inds = self._get_index(name) + if not len(inds): + raise KeyError(name) + + if obj is None: + indexer = inds if self.axis == 0 else (slice(None), inds) + return self._selected_obj.iloc[indexer] + else: + warnings.warn( + "obj is deprecated and will be removed in a future version. " + "Do ``df.iloc[gb.indices.get(name)]`` " + "instead of ``gb.get_group(name, obj=df)``.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return obj._take_with_is_copy(inds, axis=self.axis) + + @final + def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: + """ + Groupby iterator. + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> for x, y in ser.groupby(level=0): + ... print(f'{x}\\n{y}\\n') + a + a 1 + a 2 + dtype: int64 + b + b 3 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"]) + >>> df + a b c + 0 1 2 3 + 1 1 5 6 + 2 7 8 9 + >>> for x, y in df.groupby(by=["a"]): + ... print(f'{x}\\n{y}\\n') + (1,) + a b c + 0 1 2 3 + 1 1 5 6 + (7,) + a b c + 2 7 8 9 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> for x, y in ser.resample('MS'): + ... print(f'{x}\\n{y}\\n') + 2023-01-01 00:00:00 + 2023-01-01 1 + 2023-01-15 2 + dtype: int64 + 2023-02-01 00:00:00 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + """ + keys = self.keys + level = self.level + result = self._grouper.get_iterator(self._selected_obj, axis=self.axis) + # error: Argument 1 to "len" has incompatible type "Hashable"; expected "Sized" + if is_list_like(level) and len(level) == 1: # type: ignore[arg-type] + # GH 51583 + warnings.warn( + "Creating a Groupby object with a length-1 list-like " + "level parameter will yield indexes as tuples in a future version. " + "To keep indexes as scalars, create Groupby objects with " + "a scalar level parameter instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if isinstance(keys, list) and len(keys) == 1: + # GH#42795 - when keys is a list, return tuples even when length is 1 + result = (((key,), group) for key, group in result) + return result + + +# To track operations that expand dimensions, like ohlc +OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame) + + +class GroupBy(BaseGroupBy[NDFrameT]): + """ + Class for grouping and aggregating relational data. + + See aggregate, transform, and apply functions on this object. + + It's easiest to use obj.groupby(...) to use GroupBy, but you can also do: + + :: + + grouped = groupby(obj, ...) + + Parameters + ---------- + obj : pandas object + axis : int, default 0 + level : int, default None + Level of MultiIndex + groupings : list of Grouping objects + Most users should ignore this + exclusions : array-like, optional + List of columns to exclude + name : str + Most users should ignore this + + Returns + ------- + **Attributes** + groups : dict + {group name -> group labels} + len(grouped) : int + Number of groups + + Notes + ----- + After grouping, see aggregate, apply, and transform functions. Here are + some other brief notes about usage. When grouping by multiple groups, the + result index will be a MultiIndex (hierarchical) by default. + + Iteration produces (key, group) tuples, i.e. chunking the data by group. So + you can write code like: + + :: + + grouped = obj.groupby(keys, axis=axis) + for key, group in grouped: + # do something with the data + + Function calls on GroupBy, if not specially implemented, "dispatch" to the + grouped data. So if you group a DataFrame and wish to invoke the std() + method on each group, you can simply do: + + :: + + df.groupby(mapper).std() + + rather than + + :: + + df.groupby(mapper).aggregate(np.std) + + You can pass arguments to these "wrapped" functions, too. + + See the online documentation for full exposition on these topics and much + more + """ + + _grouper: ops.BaseGrouper + as_index: bool + + @final + def __init__( + self, + obj: NDFrameT, + keys: _KeysArgType | None = None, + axis: Axis = 0, + level: IndexLabel | None = None, + grouper: ops.BaseGrouper | None = None, + exclusions: frozenset[Hashable] | None = None, + selection: IndexLabel | None = None, + as_index: bool = True, + sort: bool = True, + group_keys: bool = True, + observed: bool | lib.NoDefault = lib.no_default, + dropna: bool = True, + ) -> None: + self._selection = selection + + assert isinstance(obj, NDFrame), type(obj) + + self.level = level + + if not as_index: + if axis != 0: + raise ValueError("as_index=False only valid for axis=0") + + self.as_index = as_index + self.keys = keys + self.sort = sort + self.group_keys = group_keys + self.dropna = dropna + + if grouper is None: + grouper, exclusions, obj = get_grouper( + obj, + keys, + axis=axis, + level=level, + sort=sort, + observed=False if observed is lib.no_default else observed, + dropna=self.dropna, + ) + + if observed is lib.no_default: + if any(ping._passed_categorical for ping in grouper.groupings): + warnings.warn( + "The default of observed=False is deprecated and will be changed " + "to True in a future version of pandas. Pass observed=False to " + "retain current behavior or observed=True to adopt the future " + "default and silence this warning.", + FutureWarning, + stacklevel=find_stack_level(), + ) + observed = False + self.observed = observed + + self.obj = obj + self.axis = obj._get_axis_number(axis) + self._grouper = grouper + self.exclusions = frozenset(exclusions) if exclusions else frozenset() + + def __getattr__(self, attr: str): + if attr in self._internal_names_set: + return object.__getattribute__(self, attr) + if attr in self.obj: + return self[attr] + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{attr}'" + ) + + @final + def _deprecate_axis(self, axis: int, name: str) -> None: + if axis == 1: + warnings.warn( + f"{type(self).__name__}.{name} with axis=1 is deprecated and " + "will be removed in a future version. Operate on the un-grouped " + "DataFrame instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + warnings.warn( + f"The 'axis' keyword in {type(self).__name__}.{name} is deprecated " + "and will be removed in a future version. " + "Call without passing 'axis' instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + @final + def _op_via_apply(self, name: str, *args, **kwargs): + """Compute the result of an operation by using GroupBy's apply.""" + f = getattr(type(self._obj_with_exclusions), name) + sig = inspect.signature(f) + + if "axis" in kwargs and kwargs["axis"] is not lib.no_default: + axis = self.obj._get_axis_number(kwargs["axis"]) + self._deprecate_axis(axis, name) + elif "axis" in kwargs: + # exclude skew here because that was already defaulting to lib.no_default + # before this deprecation was instituted + if name == "skew": + pass + elif name == "fillna": + # maintain the behavior from before the deprecation + kwargs["axis"] = None + else: + kwargs["axis"] = 0 + + # a little trickery for aggregation functions that need an axis + # argument + if "axis" in sig.parameters: + if kwargs.get("axis", None) is None or kwargs.get("axis") is lib.no_default: + kwargs["axis"] = self.axis + + def curried(x): + return f(x, *args, **kwargs) + + # preserve the name so we can detect it when calling plot methods, + # to avoid duplicates + curried.__name__ = name + + # special case otherwise extra plots are created when catching the + # exception below + if name in base.plotting_methods: + return self._python_apply_general(curried, self._selected_obj) + + is_transform = name in base.transformation_kernels + result = self._python_apply_general( + curried, + self._obj_with_exclusions, + is_transform=is_transform, + not_indexed_same=not is_transform, + ) + + if self._grouper.has_dropped_na and is_transform: + # result will have dropped rows due to nans, fill with null + # and ensure index is ordered same as the input + result = self._set_result_index_ordered(result) + return result + + # ----------------------------------------------------------------- + # Dispatch/Wrapping + + @final + def _concat_objects( + self, + values, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + from pandas.core.reshape.concat import concat + + if self.group_keys and not is_transform: + if self.as_index: + # possible MI return case + group_keys = self._grouper.result_index + group_levels = self._grouper.levels + group_names = self._grouper.names + + result = concat( + values, + axis=self.axis, + keys=group_keys, + levels=group_levels, + names=group_names, + sort=False, + ) + else: + # GH5610, returns a MI, with the first level being a + # range index + keys = list(range(len(values))) + result = concat(values, axis=self.axis, keys=keys) + + elif not not_indexed_same: + result = concat(values, axis=self.axis) + + ax = self._selected_obj._get_axis(self.axis) + if self.dropna: + labels = self._grouper.group_info[0] + mask = labels != -1 + ax = ax[mask] + + # this is a very unfortunate situation + # we can't use reindex to restore the original order + # when the ax has duplicates + # so we resort to this + # GH 14776, 30667 + # TODO: can we reuse e.g. _reindex_non_unique? + if ax.has_duplicates and not result.axes[self.axis].equals(ax): + # e.g. test_category_order_transformer + target = algorithms.unique1d(ax._values) + indexer, _ = result.index.get_indexer_non_unique(target) + result = result.take(indexer, axis=self.axis) + else: + result = result.reindex(ax, axis=self.axis, copy=False) + + else: + result = concat(values, axis=self.axis) + + if self.obj.ndim == 1: + name = self.obj.name + elif is_hashable(self._selection): + name = self._selection + else: + name = None + + if isinstance(result, Series) and name is not None: + result.name = name + + return result + + @final + def _set_result_index_ordered( + self, result: OutputFrameOrSeries + ) -> OutputFrameOrSeries: + # set the result index on the passed values object and + # return the new object, xref 8046 + + obj_axis = self.obj._get_axis(self.axis) + + if self._grouper.is_monotonic and not self._grouper.has_dropped_na: + # shortcut if we have an already ordered grouper + result = result.set_axis(obj_axis, axis=self.axis, copy=False) + return result + + # row order is scrambled => sort the rows by position in original index + original_positions = Index(self._grouper.result_ilocs()) + result = result.set_axis(original_positions, axis=self.axis, copy=False) + result = result.sort_index(axis=self.axis) + if self._grouper.has_dropped_na: + # Add back in any missing rows due to dropna - index here is integral + # with values referring to the row of the input so can use RangeIndex + result = result.reindex(RangeIndex(len(obj_axis)), axis=self.axis) + result = result.set_axis(obj_axis, axis=self.axis, copy=False) + + return result + + @final + def _insert_inaxis_grouper(self, result: Series | DataFrame) -> DataFrame: + if isinstance(result, Series): + result = result.to_frame() + + # zip in reverse so we can always insert at loc 0 + columns = result.columns + for name, lev, in_axis in zip( + reversed(self._grouper.names), + reversed(self._grouper.get_group_levels()), + reversed([grp.in_axis for grp in self._grouper.groupings]), + ): + # GH #28549 + # When using .apply(-), name will be in columns already + if name not in columns: + if in_axis: + result.insert(0, name, lev) + else: + msg = ( + "A grouping was used that is not in the columns of the " + "DataFrame and so was excluded from the result. This grouping " + "will be included in a future version of pandas. Add the " + "grouping as a column of the DataFrame to silence this warning." + ) + warnings.warn( + message=msg, + category=FutureWarning, + stacklevel=find_stack_level(), + ) + + return result + + @final + def _maybe_transpose_result(self, result: NDFrameT) -> NDFrameT: + if self.axis == 1: + # Only relevant for DataFrameGroupBy, no-op for SeriesGroupBy + result = result.T + if result.index.equals(self.obj.index): + # Retain e.g. DatetimeIndex/TimedeltaIndex freq + # e.g. test_groupby_crash_on_nunique + result.index = self.obj.index.copy() + return result + + @final + def _wrap_aggregated_output( + self, + result: Series | DataFrame, + qs: npt.NDArray[np.float64] | None = None, + ): + """ + Wraps the output of GroupBy aggregations into the expected result. + + Parameters + ---------- + result : Series, DataFrame + + Returns + ------- + Series or DataFrame + """ + # ATM we do not get here for SeriesGroupBy; when we do, we will + # need to require that result.name already match self.obj.name + + if not self.as_index: + # `not self.as_index` is only relevant for DataFrameGroupBy, + # enforced in __init__ + result = self._insert_inaxis_grouper(result) + result = result._consolidate() + index = Index(range(self._grouper.ngroups)) + + else: + index = self._grouper.result_index + + if qs is not None: + # We get here with len(qs) != 1 and not self.as_index + # in test_pass_args_kwargs + index = _insert_quantile_level(index, qs) + + result.index = index + + # error: Argument 1 to "_maybe_transpose_result" of "GroupBy" has + # incompatible type "Union[Series, DataFrame]"; expected "NDFrameT" + res = self._maybe_transpose_result(result) # type: ignore[arg-type] + return self._reindex_output(res, qs=qs) + + def _wrap_applied_output( + self, + data, + values: list, + not_indexed_same: bool = False, + is_transform: bool = False, + ): + raise AbstractMethodError(self) + + # ----------------------------------------------------------------- + # numba + + @final + def _numba_prep(self, data: DataFrame): + ids, _, ngroups = self._grouper.group_info + sorted_index = self._grouper._sort_idx + sorted_ids = self._grouper._sorted_ids + + sorted_data = data.take(sorted_index, axis=self.axis).to_numpy() + # GH 46867 + index_data = data.index + if isinstance(index_data, MultiIndex): + if len(self._grouper.groupings) > 1: + raise NotImplementedError( + "Grouping with more than 1 grouping labels and " + "a MultiIndex is not supported with engine='numba'" + ) + group_key = self._grouper.groupings[0].name + index_data = index_data.get_level_values(group_key) + sorted_index_data = index_data.take(sorted_index).to_numpy() + + starts, ends = lib.generate_slices(sorted_ids, ngroups) + return ( + starts, + ends, + sorted_index_data, + sorted_data, + ) + + def _numba_agg_general( + self, + func: Callable, + dtype_mapping: dict[np.dtype, Any], + engine_kwargs: dict[str, bool] | None, + **aggregator_kwargs, + ): + """ + Perform groupby with a standard numerical aggregation function (e.g. mean) + with Numba. + """ + if not self.as_index: + raise NotImplementedError( + "as_index=False is not supported. Use .reset_index() instead." + ) + if self.axis == 1: + raise NotImplementedError("axis=1 is not supported.") + + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + aggregator = executor.generate_shared_aggregator( + func, + dtype_mapping, + True, # is_grouped_kernel + **get_jit_arguments(engine_kwargs), + ) + # Pass group ids to kernel directly if it can handle it + # (This is faster since it doesn't require a sort) + ids, _, _ = self._grouper.group_info + ngroups = self._grouper.ngroups + + res_mgr = df._mgr.apply( + aggregator, labels=ids, ngroups=ngroups, **aggregator_kwargs + ) + res_mgr.axes[1] = self._grouper.result_index + result = df._constructor_from_mgr(res_mgr, axes=res_mgr.axes) + + if data.ndim == 1: + result = result.squeeze("columns") + result.name = data.name + else: + result.columns = data.columns + return result + + @final + def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby transform routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + starts, ends, sorted_index, sorted_data = self._numba_prep(df) + numba_.validate_udf(func) + numba_transform_func = numba_.generate_numba_transform_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + result = numba_transform_func( + sorted_data, + sorted_index, + starts, + ends, + len(df.columns), + *args, + ) + # result values needs to be resorted to their original positions since we + # evaluated the data sorted by group + result = result.take(np.argsort(sorted_index), axis=0) + index = data.index + if data.ndim == 1: + result_kwargs = {"name": data.name} + result = result.ravel() + else: + result_kwargs = {"columns": data.columns} + return data._constructor(result, index=index, **result_kwargs) + + @final + def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs): + """ + Perform groupby aggregation routine with the numba engine. + + This routine mimics the data splitting routine of the DataSplitter class + to generate the indices of each group in the sorted data and then passes the + data and indices into a Numba jitted function. + """ + data = self._obj_with_exclusions + df = data if data.ndim == 2 else data.to_frame() + + starts, ends, sorted_index, sorted_data = self._numba_prep(df) + numba_.validate_udf(func) + numba_agg_func = numba_.generate_numba_agg_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + result = numba_agg_func( + sorted_data, + sorted_index, + starts, + ends, + len(df.columns), + *args, + ) + index = self._grouper.result_index + if data.ndim == 1: + result_kwargs = {"name": data.name} + result = result.ravel() + else: + result_kwargs = {"columns": data.columns} + res = data._constructor(result, index=index, **result_kwargs) + if not self.as_index: + res = self._insert_inaxis_grouper(res) + res.index = default_index(len(res)) + return res + + # ----------------------------------------------------------------- + # apply/agg/transform + + @Appender( + _apply_docs["template"].format( + input="dataframe", examples=_apply_docs["dataframe_examples"] + ) + ) + def apply(self, func, *args, include_groups: bool = True, **kwargs) -> NDFrameT: + orig_func = func + func = com.is_builtin_func(func) + if orig_func != func: + alias = com._builtin_table_alias[orig_func] + warn_alias_replacement(self, orig_func, alias) + + if isinstance(func, str): + if hasattr(self, func): + res = getattr(self, func) + if callable(res): + return res(*args, **kwargs) + elif args or kwargs: + raise ValueError(f"Cannot pass arguments to property {func}") + return res + + else: + raise TypeError(f"apply func should be callable, not '{func}'") + + elif args or kwargs: + if callable(func): + + @wraps(func) + def f(g): + return func(g, *args, **kwargs) + + else: + raise ValueError( + "func must be a callable if args or kwargs are supplied" + ) + else: + f = func + + if not include_groups: + return self._python_apply_general(f, self._obj_with_exclusions) + + # ignore SettingWithCopy here in case the user mutates + with option_context("mode.chained_assignment", None): + try: + result = self._python_apply_general(f, self._selected_obj) + if ( + not isinstance(self.obj, Series) + and self._selection is None + and self._selected_obj.shape != self._obj_with_exclusions.shape + ): + warnings.warn( + message=_apply_groupings_depr.format( + type(self).__name__, "apply" + ), + category=DeprecationWarning, + stacklevel=find_stack_level(), + ) + except TypeError: + # gh-20949 + # try again, with .apply acting as a filtering + # operation, by excluding the grouping column + # This would normally not be triggered + # except if the udf is trying an operation that + # fails on *some* columns, e.g. a numeric operation + # on a string grouper column + + return self._python_apply_general(f, self._obj_with_exclusions) + + return result + + @final + def _python_apply_general( + self, + f: Callable, + data: DataFrame | Series, + not_indexed_same: bool | None = None, + is_transform: bool = False, + is_agg: bool = False, + ) -> NDFrameT: + """ + Apply function f in python space + + Parameters + ---------- + f : callable + Function to apply + data : Series or DataFrame + Data to apply f to + not_indexed_same: bool, optional + When specified, overrides the value of not_indexed_same. Apply behaves + differently when the result index is equal to the input index, but + this can be coincidental leading to value-dependent behavior. + is_transform : bool, default False + Indicator for whether the function is actually a transform + and should not have group keys prepended. + is_agg : bool, default False + Indicator for whether the function is an aggregation. When the + result is empty, we don't want to warn for this case. + See _GroupBy._python_agg_general. + + Returns + ------- + Series or DataFrame + data after applying f + """ + values, mutated = self._grouper.apply_groupwise(f, data, self.axis) + if not_indexed_same is None: + not_indexed_same = mutated + + return self._wrap_applied_output( + data, + values, + not_indexed_same, + is_transform, + ) + + @final + def _agg_general( + self, + numeric_only: bool = False, + min_count: int = -1, + *, + alias: str, + npfunc: Callable | None = None, + **kwargs, + ): + result = self._cython_agg_general( + how=alias, + alt=npfunc, + numeric_only=numeric_only, + min_count=min_count, + **kwargs, + ) + return result.__finalize__(self.obj, method="groupby") + + def _agg_py_fallback( + self, how: str, values: ArrayLike, ndim: int, alt: Callable + ) -> ArrayLike: + """ + Fallback to pure-python aggregation if _cython_operation raises + NotImplementedError. + """ + # We get here with a) EADtypes and b) object dtype + assert alt is not None + + if values.ndim == 1: + # For DataFrameGroupBy we only get here with ExtensionArray + ser = Series(values, copy=False) + else: + # We only get here with values.dtype == object + df = DataFrame(values.T, dtype=values.dtype) + # bc we split object blocks in grouped_reduce, we have only 1 col + # otherwise we'd have to worry about block-splitting GH#39329 + assert df.shape[1] == 1 + # Avoid call to self.values that can occur in DataFrame + # reductions; see GH#28949 + ser = df.iloc[:, 0] + + # We do not get here with UDFs, so we know that our dtype + # should always be preserved by the implemented aggregations + # TODO: Is this exactly right; see WrappedCythonOp get_result_dtype? + try: + res_values = self._grouper.agg_series(ser, alt, preserve_dtype=True) + except Exception as err: + msg = f"agg function failed [how->{how},dtype->{ser.dtype}]" + # preserve the kind of exception that raised + raise type(err)(msg) from err + + if ser.dtype == object: + res_values = res_values.astype(object, copy=False) + + # If we are DataFrameGroupBy and went through a SeriesGroupByPath + # then we need to reshape + # GH#32223 includes case with IntegerArray values, ndarray res_values + # test_groupby_duplicate_columns with object dtype values + return ensure_block_shape(res_values, ndim=ndim) + + @final + def _cython_agg_general( + self, + how: str, + alt: Callable | None = None, + numeric_only: bool = False, + min_count: int = -1, + **kwargs, + ): + # Note: we never get here with how="ohlc" for DataFrameGroupBy; + # that goes through SeriesGroupBy + + data = self._get_data_to_aggregate(numeric_only=numeric_only, name=how) + + def array_func(values: ArrayLike) -> ArrayLike: + try: + result = self._grouper._cython_operation( + "aggregate", + values, + how, + axis=data.ndim - 1, + min_count=min_count, + **kwargs, + ) + except NotImplementedError: + # generally if we have numeric_only=False + # and non-applicable functions + # try to python agg + # TODO: shouldn't min_count matter? + # TODO: avoid special casing SparseArray here + if how in ["any", "all"] and isinstance(values, SparseArray): + pass + elif alt is None or how in ["any", "all", "std", "sem"]: + raise # TODO: re-raise as TypeError? should not be reached + else: + return result + + assert alt is not None + result = self._agg_py_fallback(how, values, ndim=data.ndim, alt=alt) + return result + + new_mgr = data.grouped_reduce(array_func) + res = self._wrap_agged_manager(new_mgr) + if how in ["idxmin", "idxmax"]: + res = self._wrap_idxmax_idxmin(res) + out = self._wrap_aggregated_output(res) + if self.axis == 1: + out = out.infer_objects(copy=False) + return out + + def _cython_transform( + self, how: str, numeric_only: bool = False, axis: AxisInt = 0, **kwargs + ): + raise AbstractMethodError(self) + + @final + def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): + # optimized transforms + orig_func = func + func = com.get_cython_func(func) or func + if orig_func != func: + warn_alias_replacement(self, orig_func, func) + + if not isinstance(func, str): + return self._transform_general(func, engine, engine_kwargs, *args, **kwargs) + + elif func not in base.transform_kernel_allowlist: + msg = f"'{func}' is not a valid function name for transform(name)" + raise ValueError(msg) + elif func in base.cythonized_kernels or func in base.transformation_kernels: + # cythonized transform or canned "agg+broadcast" + if engine is not None: + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + return getattr(self, func)(*args, **kwargs) + + else: + # i.e. func in base.reduction_kernels + + # GH#30918 Use _transform_fast only when we know func is an aggregation + # If func is a reduction, we need to broadcast the + # result to the whole group. Compute func result + # and deal with possible broadcasting below. + with com.temp_setattr(self, "as_index", True): + # GH#49834 - result needs groups in the index for + # _wrap_transform_fast_result + if func in ["idxmin", "idxmax"]: + func = cast(Literal["idxmin", "idxmax"], func) + result = self._idxmax_idxmin(func, True, *args, **kwargs) + else: + if engine is not None: + kwargs["engine"] = engine + kwargs["engine_kwargs"] = engine_kwargs + result = getattr(self, func)(*args, **kwargs) + + return self._wrap_transform_fast_result(result) + + @final + def _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT: + """ + Fast transform path for aggregations. + """ + obj = self._obj_with_exclusions + + # for each col, reshape to size of original frame by take operation + ids, _, _ = self._grouper.group_info + result = result.reindex(self._grouper.result_index, axis=self.axis, copy=False) + + if self.obj.ndim == 1: + # i.e. SeriesGroupBy + out = algorithms.take_nd(result._values, ids) + output = obj._constructor(out, index=obj.index, name=obj.name) + else: + # `.size()` gives Series output on DataFrame input, need axis 0 + axis = 0 if result.ndim == 1 else self.axis + # GH#46209 + # Don't convert indices: negative indices need to give rise + # to null values in the result + new_ax = result.axes[axis].take(ids) + output = result._reindex_with_indexers( + {axis: (new_ax, ids)}, allow_dups=True, copy=False + ) + output = output.set_axis(obj._get_axis(self.axis), axis=axis) + return output + + # ----------------------------------------------------------------- + # Utilities + + @final + def _apply_filter(self, indices, dropna): + if len(indices) == 0: + indices = np.array([], dtype="int64") + else: + indices = np.sort(np.concatenate(indices)) + if dropna: + filtered = self._selected_obj.take(indices, axis=self.axis) + else: + mask = np.empty(len(self._selected_obj.index), dtype=bool) + mask.fill(False) + mask[indices.astype(int)] = True + # mask fails to broadcast when passed to where; broadcast manually. + mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T + filtered = self._selected_obj.where(mask) # Fill with NaNs. + return filtered + + @final + def _cumcount_array(self, ascending: bool = True) -> np.ndarray: + """ + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from length of group - 1 to 0. + + Notes + ----- + this is currently implementing sort=False + (though the default is sort=True) for groupby in general + """ + ids, _, ngroups = self._grouper.group_info + sorter = get_group_index_sorter(ids, ngroups) + ids, count = ids[sorter], len(ids) + + if count == 0: + return np.empty(0, dtype=np.int64) + + run = np.r_[True, ids[:-1] != ids[1:]] + rep = np.diff(np.r_[np.nonzero(run)[0], count]) + out = (~run).cumsum() + + if ascending: + out -= np.repeat(out[run], rep) + else: + out = np.repeat(out[np.r_[run[1:], True]], rep) - out + + if self._grouper.has_dropped_na: + out = np.where(ids == -1, np.nan, out.astype(np.float64, copy=False)) + else: + out = out.astype(np.int64, copy=False) + + rev = np.empty(count, dtype=np.intp) + rev[sorter] = np.arange(count, dtype=np.intp) + return out[rev] + + # ----------------------------------------------------------------- + + @final + @property + def _obj_1d_constructor(self) -> Callable: + # GH28330 preserve subclassed Series/DataFrames + if isinstance(self.obj, DataFrame): + return self.obj._constructor_sliced + assert isinstance(self.obj, Series) + return self.obj._constructor + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def any(self, skipna: bool = True) -> NDFrameT: + """ + Return True if any value in the group is truthful, else False. + + Parameters + ---------- + skipna : bool, default True + Flag to ignore nan values during truth testing. + + Returns + ------- + Series or DataFrame + DataFrame or Series of boolean values, where a value is True if any element + is True within its respective group, False otherwise. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 0], index=lst) + >>> ser + a 1 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).any() + a True + b False + dtype: bool + + For DataFrameGroupBy: + + >>> data = [[1, 0, 3], [1, 0, 6], [7, 1, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["ostrich", "penguin", "parrot"]) + >>> df + a b c + ostrich 1 0 3 + penguin 1 0 6 + parrot 7 1 9 + >>> df.groupby(by=["a"]).any() + b c + a + 1 False True + 7 True True + """ + return self._cython_agg_general( + "any", + alt=lambda x: Series(x, copy=False).any(skipna=skipna), + skipna=skipna, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def all(self, skipna: bool = True) -> NDFrameT: + """ + Return True if all values in the group are truthful, else False. + + Parameters + ---------- + skipna : bool, default True + Flag to ignore nan values during truth testing. + + Returns + ------- + Series or DataFrame + DataFrame or Series of boolean values, where a value is True if all elements + are True within its respective group, False otherwise. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 0], index=lst) + >>> ser + a 1 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).all() + a True + b False + dtype: bool + + For DataFrameGroupBy: + + >>> data = [[1, 0, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["ostrich", "penguin", "parrot"]) + >>> df + a b c + ostrich 1 0 3 + penguin 1 5 6 + parrot 7 8 9 + >>> df.groupby(by=["a"]).all() + b c + a + 1 False True + 7 True True + """ + return self._cython_agg_general( + "all", + alt=lambda x: Series(x, copy=False).all(skipna=skipna), + skipna=skipna, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def count(self) -> NDFrameT: + """ + Compute count of group, excluding missing values. + + Returns + ------- + Series or DataFrame + Count of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, np.nan], index=lst) + >>> ser + a 1.0 + a 2.0 + b NaN + dtype: float64 + >>> ser.groupby(level=0).count() + a 2 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, np.nan, 3], [1, np.nan, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 NaN 3 + horse 1 NaN 6 + bull 7 8.0 9 + >>> df.groupby("a").count() + b c + a + 1 0 2 + 7 1 1 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 4], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01', '2023-02-15'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + 2023-02-15 4 + dtype: int64 + >>> ser.resample('MS').count() + 2023-01-01 2 + 2023-02-01 2 + Freq: MS, dtype: int64 + """ + data = self._get_data_to_aggregate() + ids, _, ngroups = self._grouper.group_info + mask = ids != -1 + + is_series = data.ndim == 1 + + def hfunc(bvalues: ArrayLike) -> ArrayLike: + # TODO(EA2D): reshape would not be necessary with 2D EAs + if bvalues.ndim == 1: + # EA + masked = mask & ~isna(bvalues).reshape(1, -1) + else: + masked = mask & ~isna(bvalues) + + counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups) + if isinstance(bvalues, BaseMaskedArray): + return IntegerArray( + counted[0], mask=np.zeros(counted.shape[1], dtype=np.bool_) + ) + elif isinstance(bvalues, ArrowExtensionArray) and not isinstance( + bvalues.dtype, StringDtype + ): + dtype = pandas_dtype("int64[pyarrow]") + return type(bvalues)._from_sequence(counted[0], dtype=dtype) + if is_series: + assert counted.ndim == 2 + assert counted.shape[0] == 1 + return counted[0] + return counted + + new_mgr = data.grouped_reduce(hfunc) + new_obj = self._wrap_agged_manager(new_mgr) + + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _wrap_aggregated_output() returns. GH 35028 + # e.g. test_dataframe_groupby_on_2_categoricals_when_observed_is_false + with com.temp_setattr(self, "observed", True): + result = self._wrap_aggregated_output(new_obj) + + return self._reindex_output(result, fill_value=0) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + """ + Compute mean of groups, excluding missing values. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None`` and defaults to ``False``. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + Returns + ------- + pandas.Series or pandas.DataFrame + %(see_also)s + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5], + ... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C']) + + Groupby one column and return the mean of the remaining columns in + each group. + + >>> df.groupby('A').mean() + B C + A + 1 3.0 1.333333 + 2 4.0 1.500000 + + Groupby two columns and return the mean of the remaining column. + + >>> df.groupby(['A', 'B']).mean() + C + A B + 1 2.0 2.0 + 4.0 1.0 + 2 3.0 1.0 + 5.0 2.0 + + Groupby one column and return the mean of only particular column in + the group. + + >>> df.groupby('A')['B'].mean() + A + 1 3.0 + 2 4.0 + Name: B, dtype: float64 + """ + + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_mean + + return self._numba_agg_general( + grouped_mean, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ) + else: + result = self._cython_agg_general( + "mean", + alt=lambda x: Series(x, copy=False).mean(numeric_only=numeric_only), + numeric_only=numeric_only, + ) + return result.__finalize__(self.obj, method="groupby") + + @final + def median(self, numeric_only: bool = False) -> NDFrameT: + """ + Compute median of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionchanged:: 2.0.0 + + numeric_only no longer accepts ``None`` and defaults to False. + + Returns + ------- + Series or DataFrame + Median of values within each group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).median() + a 7.0 + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).median() + a b + dog 3.0 4.0 + mouse 7.0 3.0 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3, 3, 4, 5], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').median() + 2023-01-01 2.0 + 2023-02-01 4.0 + Freq: MS, dtype: float64 + """ + result = self._cython_agg_general( + "median", + alt=lambda x: Series(x, copy=False).median(numeric_only=numeric_only), + numeric_only=numeric_only, + ) + return result.__finalize__(self.obj, method="groupby") + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def std( + self, + ddof: int = 1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + numeric_only: bool = False, + ): + """ + Compute standard deviation of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Standard deviation of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).std() + a 3.21455 + b 0.57735 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).std() + a b + dog 2.000000 3.511885 + mouse 2.217356 1.500000 + """ + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_var + + return np.sqrt( + self._numba_agg_general( + grouped_var, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ddof=ddof, + ) + ) + else: + return self._cython_agg_general( + "std", + alt=lambda x: Series(x, copy=False).std(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def var( + self, + ddof: int = 1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + numeric_only: bool = False, + ): + """ + Compute variance of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting + ``compute.use_numba`` + + .. versionadded:: 1.4.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: 1.4.0 + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Variance of values within each group. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).var() + a 10.333333 + b 0.333333 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).var() + a b + dog 4.000000 12.333333 + mouse 4.916667 2.250000 + """ + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_var + + return self._numba_agg_general( + grouped_var, + executor.float_dtype_mapping, + engine_kwargs, + min_periods=0, + ddof=ddof, + ) + else: + return self._cython_agg_general( + "var", + alt=lambda x: Series(x, copy=False).var(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + def _value_counts( + self, + subset: Sequence[Hashable] | None = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + dropna: bool = True, + ) -> DataFrame | Series: + """ + Shared implementation of value_counts for SeriesGroupBy and DataFrameGroupBy. + + SeriesGroupBy additionally supports a bins argument. See the docstring of + DataFrameGroupBy.value_counts for a description of arguments. + """ + if self.axis == 1: + raise NotImplementedError( + "DataFrameGroupBy.value_counts only handles axis=0" + ) + name = "proportion" if normalize else "count" + + df = self.obj + obj = self._obj_with_exclusions + + in_axis_names = { + grouping.name for grouping in self._grouper.groupings if grouping.in_axis + } + if isinstance(obj, Series): + _name = obj.name + keys = [] if _name in in_axis_names else [obj] + else: + unique_cols = set(obj.columns) + if subset is not None: + subsetted = set(subset) + clashing = subsetted & set(in_axis_names) + if clashing: + raise ValueError( + f"Keys {clashing} in subset cannot be in " + "the groupby column keys." + ) + doesnt_exist = subsetted - unique_cols + if doesnt_exist: + raise ValueError( + f"Keys {doesnt_exist} in subset do not " + f"exist in the DataFrame." + ) + else: + subsetted = unique_cols + + keys = [ + # Can't use .values because the column label needs to be preserved + obj.iloc[:, idx] + for idx, _name in enumerate(obj.columns) + if _name not in in_axis_names and _name in subsetted + ] + + groupings = list(self._grouper.groupings) + for key in keys: + grouper, _, _ = get_grouper( + df, + key=key, + axis=self.axis, + sort=self.sort, + observed=False, + dropna=dropna, + ) + groupings += list(grouper.groupings) + + # Take the size of the overall columns + gb = df.groupby( + groupings, + sort=self.sort, + observed=self.observed, + dropna=self.dropna, + ) + result_series = cast(Series, gb.size()) + result_series.name = name + + # GH-46357 Include non-observed categories + # of non-grouping columns regardless of `observed` + if any( + isinstance(grouping.grouping_vector, (Categorical, CategoricalIndex)) + and not grouping._observed + for grouping in groupings + ): + levels_list = [ping._result_index for ping in groupings] + multi_index = MultiIndex.from_product( + levels_list, names=[ping.name for ping in groupings] + ) + result_series = result_series.reindex(multi_index, fill_value=0) + + if sort: + # Sort by the values + result_series = result_series.sort_values( + ascending=ascending, kind="stable" + ) + if self.sort: + # Sort by the groupings + names = result_series.index.names + # GH#55951 - Temporarily replace names in case they are integers + result_series.index.names = range(len(names)) + index_level = list(range(len(self._grouper.groupings))) + result_series = result_series.sort_index( + level=index_level, sort_remaining=False + ) + result_series.index.names = names + + if normalize: + # Normalize the results by dividing by the original group sizes. + # We are guaranteed to have the first N levels be the + # user-requested grouping. + levels = list( + range(len(self._grouper.groupings), result_series.index.nlevels) + ) + indexed_group_size = result_series.groupby( + result_series.index.droplevel(levels), + sort=self.sort, + dropna=self.dropna, + # GH#43999 - deprecation of observed=False + observed=False, + ).transform("sum") + result_series /= indexed_group_size + + # Handle groups of non-observed categories + result_series = result_series.fillna(0.0) + + result: Series | DataFrame + if self.as_index: + result = result_series + else: + # Convert to frame + index = result_series.index + columns = com.fill_missing_names(index.names) + if name in columns: + raise ValueError(f"Column label '{name}' is duplicate of result column") + result_series.name = name + result_series.index = index.set_names(range(len(columns))) + result_frame = result_series.reset_index() + orig_dtype = self._grouper.groupings[0].obj.columns.dtype # type: ignore[union-attr] + cols = Index(columns, dtype=orig_dtype).insert(len(columns), name) + result_frame.columns = cols + result = result_frame + return result.__finalize__(self.obj, method="value_counts") + + @final + def sem(self, ddof: int = 1, numeric_only: bool = False) -> NDFrameT: + """ + Compute standard error of the mean of groups, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex. + + Parameters + ---------- + ddof : int, default 1 + Degrees of freedom. + + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Standard error of the mean of values within each group. + + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([5, 10, 8, 14], index=lst) + >>> ser + a 5 + a 10 + b 8 + b 14 + dtype: int64 + >>> ser.groupby(level=0).sem() + a 2.5 + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 12, 11], [1, 15, 2], [2, 5, 8], [2, 6, 12]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 12 11 + salmon 1 15 2 + catfish 2 5 8 + goldfish 2 6 12 + >>> df.groupby("a").sem() + b c + a + 1 1.5 4.5 + 2 0.5 2.0 + + For Resampler: + + >>> ser = pd.Series([1, 3, 2, 4, 3, 8], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').sem() + 2023-01-01 0.577350 + 2023-02-01 1.527525 + Freq: MS, dtype: float64 + """ + if numeric_only and self.obj.ndim == 1 and not is_numeric_dtype(self.obj.dtype): + raise TypeError( + f"{type(self).__name__}.sem called with " + f"numeric_only={numeric_only} and dtype {self.obj.dtype}" + ) + return self._cython_agg_general( + "sem", + alt=lambda x: Series(x, copy=False).sem(ddof=ddof), + numeric_only=numeric_only, + ddof=ddof, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def size(self) -> DataFrame | Series: + """ + Compute group sizes. + + Returns + ------- + DataFrame or Series + Number of rows in each group as a Series if as_index is True + or a DataFrame if as_index is False. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([1, 2, 3], index=lst) + >>> ser + a 1 + a 2 + b 3 + dtype: int64 + >>> ser.groupby(level=0).size() + a 2 + b 1 + dtype: int64 + + >>> data = [[1, 2, 3], [1, 5, 6], [7, 8, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["owl", "toucan", "eagle"]) + >>> df + a b c + owl 1 2 3 + toucan 1 5 6 + eagle 7 8 9 + >>> df.groupby("a").size() + a + 1 2 + 7 1 + dtype: int64 + + For Resampler: + + >>> ser = pd.Series([1, 2, 3], index=pd.DatetimeIndex( + ... ['2023-01-01', '2023-01-15', '2023-02-01'])) + >>> ser + 2023-01-01 1 + 2023-01-15 2 + 2023-02-01 3 + dtype: int64 + >>> ser.resample('MS').size() + 2023-01-01 2 + 2023-02-01 1 + Freq: MS, dtype: int64 + """ + result = self._grouper.size() + dtype_backend: None | Literal["pyarrow", "numpy_nullable"] = None + if isinstance(self.obj, Series): + if isinstance(self.obj.array, ArrowExtensionArray): + if isinstance(self.obj.array, ArrowStringArrayNumpySemantics): + dtype_backend = None + elif isinstance(self.obj.array, ArrowStringArray): + dtype_backend = "numpy_nullable" + else: + dtype_backend = "pyarrow" + elif isinstance(self.obj.array, BaseMaskedArray): + dtype_backend = "numpy_nullable" + # TODO: For DataFrames what if columns are mixed arrow/numpy/masked? + + # GH28330 preserve subclassed Series/DataFrames through calls + if isinstance(self.obj, Series): + result = self._obj_1d_constructor(result, name=self.obj.name) + else: + result = self._obj_1d_constructor(result) + + if dtype_backend is not None: + result = result.convert_dtypes( + infer_objects=False, + convert_string=False, + convert_boolean=False, + convert_floating=False, + dtype_backend=dtype_backend, + ) + + with com.temp_setattr(self, "as_index", True): + # size already has the desired behavior in GH#49519, but this makes the + # as_index=False path of _reindex_output fail on categorical groupers. + result = self._reindex_output(result, fill_value=0) + if not self.as_index: + # error: Incompatible types in assignment (expression has + # type "DataFrame", variable has type "Series") + result = result.rename("size").reset_index() # type: ignore[assignment] + return result + + @final + @doc( + _groupby_agg_method_engine_template, + fname="sum", + no=False, + mc=0, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).sum() + a 3 + b 7 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").sum() + b c + a + 1 10 7 + 2 11 17""" + ), + ) + def sum( + self, + numeric_only: bool = False, + min_count: int = 0, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_sum + + return self._numba_agg_general( + grouped_sum, + executor.default_dtype_mapping, + engine_kwargs, + min_periods=min_count, + ) + else: + # If we are grouping on categoricals we want unobserved categories to + # return zero, rather than the default of NaN which the reindexing in + # _agg_general() returns. GH #31422 + with com.temp_setattr(self, "observed", True): + result = self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="sum", + npfunc=np.sum, + ) + + return self._reindex_output(result, fill_value=0) + + @final + @doc( + _groupby_agg_method_template, + fname="prod", + no=False, + mc=0, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).prod() + a 2 + b 12 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").prod() + b c + a + 1 16 10 + 2 30 72""" + ), + ) + def prod(self, numeric_only: bool = False, min_count: int = 0) -> NDFrameT: + return self._agg_general( + numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod + ) + + @final + @doc( + _groupby_agg_method_engine_template, + fname="min", + no=False, + mc=-1, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).min() + a 1 + b 3 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").min() + b c + a + 1 2 2 + 2 5 8""" + ), + ) + def min( + self, + numeric_only: bool = False, + min_count: int = -1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_min_max + + return self._numba_agg_general( + grouped_min_max, + executor.identity_dtype_mapping, + engine_kwargs, + min_periods=min_count, + is_max=False, + ) + else: + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="min", + npfunc=np.min, + ) + + @final + @doc( + _groupby_agg_method_engine_template, + fname="max", + no=False, + mc=-1, + e=None, + ek=None, + example=dedent( + """\ + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).max() + a 2 + b 4 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tiger", "leopard", "cheetah", "lion"]) + >>> df + a b c + tiger 1 8 2 + leopard 1 2 5 + cheetah 2 5 8 + lion 2 6 9 + >>> df.groupby("a").max() + b c + a + 1 8 5 + 2 6 9""" + ), + ) + def max( + self, + numeric_only: bool = False, + min_count: int = -1, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + from pandas.core._numba.kernels import grouped_min_max + + return self._numba_agg_general( + grouped_min_max, + executor.identity_dtype_mapping, + engine_kwargs, + min_periods=min_count, + is_max=True, + ) + else: + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="max", + npfunc=np.max, + ) + + @final + def first( + self, numeric_only: bool = False, min_count: int = -1, skipna: bool = True + ) -> NDFrameT: + """ + Compute the first entry of each column within each group. + + Defaults to skipping NA elements. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. + min_count : int, default -1 + The required number of valid values to perform the operation. If fewer + than ``min_count`` valid values are present the result will be NA. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + + .. versionadded:: 2.2.1 + + Returns + ------- + Series or DataFrame + First values within each group. + + See Also + -------- + DataFrame.groupby : Apply a function groupby to each row or column of a + DataFrame. + pandas.core.groupby.DataFrameGroupBy.last : Compute the last non-null entry + of each column. + pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. + + Examples + -------- + >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[None, 5, 6], C=[1, 2, 3], + ... D=['3/11/2000', '3/12/2000', '3/13/2000'])) + >>> df['D'] = pd.to_datetime(df['D']) + >>> df.groupby("A").first() + B C D + A + 1 5.0 1 2000-03-11 + 3 6.0 3 2000-03-13 + >>> df.groupby("A").first(min_count=2) + B C D + A + 1 NaN 1.0 2000-03-11 + 3 NaN NaN NaT + >>> df.groupby("A").first(numeric_only=True) + B C + A + 1 5.0 1 + 3 6.0 3 + """ + + def first_compat(obj: NDFrameT, axis: AxisInt = 0): + def first(x: Series): + """Helper function for first item that isn't NA.""" + arr = x.array[notna(x.array)] + if not len(arr): + return x.array.dtype.na_value + return arr[0] + + if isinstance(obj, DataFrame): + return obj.apply(first, axis=axis) + elif isinstance(obj, Series): + return first(obj) + else: # pragma: no cover + raise TypeError(type(obj)) + + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="first", + npfunc=first_compat, + skipna=skipna, + ) + + @final + def last( + self, numeric_only: bool = False, min_count: int = -1, skipna: bool = True + ) -> NDFrameT: + """ + Compute the last entry of each column within each group. + + Defaults to skipping NA elements. + + Parameters + ---------- + numeric_only : bool, default False + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. + min_count : int, default -1 + The required number of valid values to perform the operation. If fewer + than ``min_count`` valid values are present the result will be NA. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + + .. versionadded:: 2.2.1 + + Returns + ------- + Series or DataFrame + Last of values within each group. + + See Also + -------- + DataFrame.groupby : Apply a function groupby to each row or column of a + DataFrame. + pandas.core.groupby.DataFrameGroupBy.first : Compute the first non-null entry + of each column. + pandas.core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. + + Examples + -------- + >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[5, None, 6], C=[1, 2, 3])) + >>> df.groupby("A").last() + B C + A + 1 5.0 2 + 3 6.0 3 + """ + + def last_compat(obj: NDFrameT, axis: AxisInt = 0): + def last(x: Series): + """Helper function for last item that isn't NA.""" + arr = x.array[notna(x.array)] + if not len(arr): + return x.array.dtype.na_value + return arr[-1] + + if isinstance(obj, DataFrame): + return obj.apply(last, axis=axis) + elif isinstance(obj, Series): + return last(obj) + else: # pragma: no cover + raise TypeError(type(obj)) + + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="last", + npfunc=last_compat, + skipna=skipna, + ) + + @final + def ohlc(self) -> DataFrame: + """ + Compute open, high, low and close values of a group, excluding missing values. + + For multiple groupings, the result index will be a MultiIndex + + Returns + ------- + DataFrame + Open, high, low and close values within each group. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC', 'SPX', 'CAC',] + >>> ser = pd.Series([3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 0.1, 0.5], index=lst) + >>> ser + SPX 3.4 + CAC 9.0 + SPX 7.2 + CAC 5.2 + SPX 8.8 + CAC 9.4 + SPX 0.1 + CAC 0.5 + dtype: float64 + >>> ser.groupby(level=0).ohlc() + open high low close + CAC 9.0 9.4 0.5 0.5 + SPX 3.4 8.8 0.1 0.1 + + For DataFrameGroupBy: + + >>> data = {2022: [1.2, 2.3, 8.9, 4.5, 4.4, 3, 2 , 1], + ... 2023: [3.4, 9.0, 7.2, 5.2, 8.8, 9.4, 8.2, 1.0]} + >>> df = pd.DataFrame(data, index=['SPX', 'CAC', 'SPX', 'CAC', + ... 'SPX', 'CAC', 'SPX', 'CAC']) + >>> df + 2022 2023 + SPX 1.2 3.4 + CAC 2.3 9.0 + SPX 8.9 7.2 + CAC 4.5 5.2 + SPX 4.4 8.8 + CAC 3.0 9.4 + SPX 2.0 8.2 + CAC 1.0 1.0 + >>> df.groupby(level=0).ohlc() + 2022 2023 + open high low close open high low close + CAC 2.3 4.5 1.0 1.0 9.0 9.4 1.0 1.0 + SPX 1.2 8.9 1.2 2.0 3.4 8.8 3.4 8.2 + + For Resampler: + + >>> ser = pd.Series([1, 3, 2, 4, 3, 5], + ... index=pd.DatetimeIndex(['2023-01-01', + ... '2023-01-10', + ... '2023-01-15', + ... '2023-02-01', + ... '2023-02-10', + ... '2023-02-15'])) + >>> ser.resample('MS').ohlc() + open high low close + 2023-01-01 1 3 1 2 + 2023-02-01 4 5 3 5 + """ + if self.obj.ndim == 1: + obj = self._selected_obj + + is_numeric = is_numeric_dtype(obj.dtype) + if not is_numeric: + raise DataError("No numeric types to aggregate") + + res_values = self._grouper._cython_operation( + "aggregate", obj._values, "ohlc", axis=0, min_count=-1 + ) + + agg_names = ["open", "high", "low", "close"] + result = self.obj._constructor_expanddim( + res_values, index=self._grouper.result_index, columns=agg_names + ) + return self._reindex_output(result) + + result = self._apply_to_column_groupbys(lambda sgb: sgb.ohlc()) + return result + + @doc(DataFrame.describe) + def describe( + self, + percentiles=None, + include=None, + exclude=None, + ) -> NDFrameT: + obj = self._obj_with_exclusions + + if len(obj) == 0: + described = obj.describe( + percentiles=percentiles, include=include, exclude=exclude + ) + if obj.ndim == 1: + result = described + else: + result = described.unstack() + return result.to_frame().T.iloc[:0] + + with com.temp_setattr(self, "as_index", True): + result = self._python_apply_general( + lambda x: x.describe( + percentiles=percentiles, include=include, exclude=exclude + ), + obj, + not_indexed_same=True, + ) + if self.axis == 1: + return result.T + + # GH#49256 - properly handle the grouping column(s) + result = result.unstack() + if not self.as_index: + result = self._insert_inaxis_grouper(result) + result.index = default_index(len(result)) + + return result + + @final + def resample(self, rule, *args, include_groups: bool = True, **kwargs) -> Resampler: + """ + Provide resampling when using a TimeGrouper. + + Given a grouper, the function resamples it according to a string + "string" -> "frequency". + + See the :ref:`frequency aliases ` + documentation for more details. + + Parameters + ---------- + rule : str or DateOffset + The offset string or object representing target grouper conversion. + *args + Possible arguments are `how`, `fill_method`, `limit`, `kind` and + `on`, and other arguments of `TimeGrouper`. + include_groups : bool, default True + When True, will attempt to include the groupings in the operation in + the case that they are columns of the DataFrame. If this raises a + TypeError, the result will be computed with the groupings excluded. + When False, the groupings will be excluded when applying ``func``. + + .. versionadded:: 2.2.0 + + .. deprecated:: 2.2.0 + + Setting include_groups to True is deprecated. Only the value + False will be allowed in a future version of pandas. + + **kwargs + Possible arguments are `how`, `fill_method`, `limit`, `kind` and + `on`, and other arguments of `TimeGrouper`. + + Returns + ------- + pandas.api.typing.DatetimeIndexResamplerGroupby, + pandas.api.typing.PeriodIndexResamplerGroupby, or + pandas.api.typing.TimedeltaIndexResamplerGroupby + Return a new groupby object, with type depending on the data + being resampled. + + See Also + -------- + Grouper : Specify a frequency to resample with when + grouping by a key. + DatetimeIndex.resample : Frequency conversion and resampling of + time series. + + Examples + -------- + >>> idx = pd.date_range('1/1/2000', periods=4, freq='min') + >>> df = pd.DataFrame(data=4 * [range(2)], + ... index=idx, + ... columns=['a', 'b']) + >>> df.iloc[2, 0] = 5 + >>> df + a b + 2000-01-01 00:00:00 0 1 + 2000-01-01 00:01:00 0 1 + 2000-01-01 00:02:00 5 1 + 2000-01-01 00:03:00 0 1 + + Downsample the DataFrame into 3 minute bins and sum the values of + the timestamps falling into a bin. + + >>> df.groupby('a').resample('3min', include_groups=False).sum() + b + a + 0 2000-01-01 00:00:00 2 + 2000-01-01 00:03:00 1 + 5 2000-01-01 00:00:00 1 + + Upsample the series into 30 second bins. + + >>> df.groupby('a').resample('30s', include_groups=False).sum() + b + a + 0 2000-01-01 00:00:00 1 + 2000-01-01 00:00:30 0 + 2000-01-01 00:01:00 1 + 2000-01-01 00:01:30 0 + 2000-01-01 00:02:00 0 + 2000-01-01 00:02:30 0 + 2000-01-01 00:03:00 1 + 5 2000-01-01 00:02:00 1 + + Resample by month. Values are assigned to the month of the period. + + >>> df.groupby('a').resample('ME', include_groups=False).sum() + b + a + 0 2000-01-31 3 + 5 2000-01-31 1 + + Downsample the series into 3 minute bins as above, but close the right + side of the bin interval. + + >>> ( + ... df.groupby('a') + ... .resample('3min', closed='right', include_groups=False) + ... .sum() + ... ) + b + a + 0 1999-12-31 23:57:00 1 + 2000-01-01 00:00:00 2 + 5 2000-01-01 00:00:00 1 + + Downsample the series into 3 minute bins and close the right side of + the bin interval, but label each bin using the right edge instead of + the left. + + >>> ( + ... df.groupby('a') + ... .resample('3min', closed='right', label='right', include_groups=False) + ... .sum() + ... ) + b + a + 0 2000-01-01 00:00:00 1 + 2000-01-01 00:03:00 2 + 5 2000-01-01 00:03:00 1 + """ + from pandas.core.resample import get_resampler_for_grouping + + # mypy flags that include_groups could be specified via `*args` or `**kwargs` + # GH#54961 would resolve. + return get_resampler_for_grouping( # type: ignore[misc] + self, rule, *args, include_groups=include_groups, **kwargs + ) + + @final + def rolling(self, *args, **kwargs) -> RollingGroupby: + """ + Return a rolling grouper, providing rolling functionality per group. + + Parameters + ---------- + window : int, timedelta, str, offset, or BaseIndexer subclass + Size of the moving window. + + If an integer, the fixed number of observations used for + each window. + + If a timedelta, str, or offset, the time period of each window. Each + window will be a variable sized based on the observations included in + the time-period. This is only valid for datetimelike indexes. + To learn more about the offsets & frequency strings, please see `this link + `__. + + If a BaseIndexer subclass, the window boundaries + based on the defined ``get_window_bounds`` method. Additional rolling + keyword arguments, namely ``min_periods``, ``center``, ``closed`` and + ``step`` will be passed to ``get_window_bounds``. + + min_periods : int, default None + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + For a window that is specified by an offset, + ``min_periods`` will default to 1. + + For a window that is specified by an integer, ``min_periods`` will default + to the size of the window. + + center : bool, default False + If False, set the window labels as the right edge of the window index. + + If True, set the window labels as the center of the window index. + + win_type : str, default None + If ``None``, all points are evenly weighted. + + If a string, it must be a valid `scipy.signal window function + `__. + + Certain Scipy window types require additional parameters to be passed + in the aggregation function. The additional parameters must match + the keywords specified in the Scipy window type method signature. + + on : str, optional + For a DataFrame, a column label or Index level on which + to calculate the rolling window, rather than the DataFrame's index. + + Provided integer column is ignored and excluded from result since + an integer index is not used to calculate the rolling window. + + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + + For `Series` this parameter is unused and defaults to 0. + + closed : str, default None + If ``'right'``, the first point in the window is excluded from calculations. + + If ``'left'``, the last point in the window is excluded from calculations. + + If ``'both'``, no points in the window are excluded from calculations. + + If ``'neither'``, the first and last points in the window are excluded + from calculations. + + Default ``None`` (``'right'``). + + method : str {'single', 'table'}, default 'single' + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + Returns + ------- + pandas.api.typing.RollingGroupby + Return a new grouper with our rolling appended. + + See Also + -------- + Series.rolling : Calling object with Series data. + DataFrame.rolling : Calling object with DataFrames. + Series.groupby : Apply a function groupby to a Series. + DataFrame.groupby : Apply a function groupby. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 2, 2], + ... 'B': [1, 2, 3, 4], + ... 'C': [0.362, 0.227, 1.267, -0.562]}) + >>> df + A B C + 0 1 1 0.362 + 1 1 2 0.227 + 2 2 3 1.267 + 3 2 4 -0.562 + + >>> df.groupby('A').rolling(2).sum() + B C + A + 1 0 NaN NaN + 1 3.0 0.589 + 2 2 NaN NaN + 3 7.0 0.705 + + >>> df.groupby('A').rolling(2, min_periods=1).sum() + B C + A + 1 0 1.0 0.362 + 1 3.0 0.589 + 2 2 3.0 1.267 + 3 7.0 0.705 + + >>> df.groupby('A').rolling(2, on='B').sum() + B C + A + 1 0 1 NaN + 1 2 0.589 + 2 2 3 NaN + 3 4 0.705 + """ + from pandas.core.window import RollingGroupby + + return RollingGroupby( + self._selected_obj, + *args, + _grouper=self._grouper, + _as_index=self.as_index, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Appender(_common_see_also) + def expanding(self, *args, **kwargs) -> ExpandingGroupby: + """ + Return an expanding grouper, providing expanding + functionality per group. + + Returns + ------- + pandas.api.typing.ExpandingGroupby + """ + from pandas.core.window import ExpandingGroupby + + return ExpandingGroupby( + self._selected_obj, + *args, + _grouper=self._grouper, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Appender(_common_see_also) + def ewm(self, *args, **kwargs) -> ExponentialMovingWindowGroupby: + """ + Return an ewm grouper, providing ewm functionality per group. + + Returns + ------- + pandas.api.typing.ExponentialMovingWindowGroupby + """ + from pandas.core.window import ExponentialMovingWindowGroupby + + return ExponentialMovingWindowGroupby( + self._selected_obj, + *args, + _grouper=self._grouper, + **kwargs, + ) + + @final + def _fill(self, direction: Literal["ffill", "bfill"], limit: int | None = None): + """ + Shared function for `pad` and `backfill` to call Cython method. + + Parameters + ---------- + direction : {'ffill', 'bfill'} + Direction passed to underlying Cython function. `bfill` will cause + values to be filled backwards. `ffill` and any other values will + default to a forward fill + limit : int, default None + Maximum number of consecutive values to fill. If `None`, this + method will convert to -1 prior to passing to Cython + + Returns + ------- + `Series` or `DataFrame` with filled values + + See Also + -------- + pad : Returns Series with minimum number of char in object. + backfill : Backward fill the missing values in the dataset. + """ + # Need int value for Cython + if limit is None: + limit = -1 + + ids, _, _ = self._grouper.group_info + sorted_labels = np.argsort(ids, kind="mergesort").astype(np.intp, copy=False) + if direction == "bfill": + sorted_labels = sorted_labels[::-1] + + col_func = partial( + libgroupby.group_fillna_indexer, + labels=ids, + sorted_labels=sorted_labels, + limit=limit, + dropna=self.dropna, + ) + + def blk_func(values: ArrayLike) -> ArrayLike: + mask = isna(values) + if values.ndim == 1: + indexer = np.empty(values.shape, dtype=np.intp) + col_func(out=indexer, mask=mask) + return algorithms.take_nd(values, indexer) + + else: + # We broadcast algorithms.take_nd analogous to + # np.take_along_axis + if isinstance(values, np.ndarray): + dtype = values.dtype + if self._grouper.has_dropped_na: + # dropped null groups give rise to nan in the result + dtype = ensure_dtype_can_hold_na(values.dtype) + out = np.empty(values.shape, dtype=dtype) + else: + # Note: we only get here with backfill/pad, + # so if we have a dtype that cannot hold NAs, + # then there will be no -1s in indexer, so we can use + # the original dtype (no need to ensure_dtype_can_hold_na) + out = type(values)._empty(values.shape, dtype=values.dtype) + + for i, value_element in enumerate(values): + # call group_fillna_indexer column-wise + indexer = np.empty(values.shape[1], dtype=np.intp) + col_func(out=indexer, mask=mask[i]) + out[i, :] = algorithms.take_nd(value_element, indexer) + return out + + mgr = self._get_data_to_aggregate() + res_mgr = mgr.apply(blk_func) + + new_obj = self._wrap_agged_manager(res_mgr) + + if self.axis == 1: + # Only relevant for DataFrameGroupBy + new_obj = new_obj.T + new_obj.columns = self.obj.columns + + new_obj.index = self.obj.index + return new_obj + + @final + @Substitution(name="groupby") + def ffill(self, limit: int | None = None): + """ + Forward fill the values. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + Series or DataFrame + Object with missing values filled. + + See Also + -------- + Series.ffill: Returns Series with minimum number of char in object. + DataFrame.ffill: Object with missing values filled or None if inplace=True. + Series.fillna: Fill NaN values of a Series. + DataFrame.fillna: Fill NaN values of a DataFrame. + + Examples + -------- + + For SeriesGroupBy: + + >>> key = [0, 0, 1, 1] + >>> ser = pd.Series([np.nan, 2, 3, np.nan], index=key) + >>> ser + 0 NaN + 0 2.0 + 1 3.0 + 1 NaN + dtype: float64 + >>> ser.groupby(level=0).ffill() + 0 NaN + 0 2.0 + 1 3.0 + 1 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> df = pd.DataFrame( + ... { + ... "key": [0, 0, 1, 1, 1], + ... "A": [np.nan, 2, np.nan, 3, np.nan], + ... "B": [2, 3, np.nan, np.nan, np.nan], + ... "C": [np.nan, np.nan, 2, np.nan, np.nan], + ... } + ... ) + >>> df + key A B C + 0 0 NaN 2.0 NaN + 1 0 2.0 3.0 NaN + 2 1 NaN NaN 2.0 + 3 1 3.0 NaN NaN + 4 1 NaN NaN NaN + + Propagate non-null values forward or backward within each group along columns. + + >>> df.groupby("key").ffill() + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN 2.0 + + Propagate non-null values forward or backward within each group along rows. + + >>> df.T.groupby(np.array([0, 0, 1, 1])).ffill().T + key A B C + 0 0.0 0.0 2.0 2.0 + 1 0.0 2.0 3.0 3.0 + 2 1.0 1.0 NaN 2.0 + 3 1.0 3.0 NaN NaN + 4 1.0 1.0 NaN NaN + + Only replace the first NaN element within a group along rows. + + >>> df.groupby("key").ffill(limit=1) + A B C + 0 NaN 2.0 NaN + 1 2.0 3.0 NaN + 2 NaN NaN 2.0 + 3 3.0 NaN 2.0 + 4 3.0 NaN NaN + """ + return self._fill("ffill", limit=limit) + + @final + @Substitution(name="groupby") + def bfill(self, limit: int | None = None): + """ + Backward fill the values. + + Parameters + ---------- + limit : int, optional + Limit of how many values to fill. + + Returns + ------- + Series or DataFrame + Object with missing values filled. + + See Also + -------- + Series.bfill : Backward fill the missing values in the dataset. + DataFrame.bfill: Backward fill the missing values in the dataset. + Series.fillna: Fill NaN values of a Series. + DataFrame.fillna: Fill NaN values of a DataFrame. + + Examples + -------- + + With Series: + + >>> index = ['Falcon', 'Falcon', 'Parrot', 'Parrot', 'Parrot'] + >>> s = pd.Series([None, 1, None, None, 3], index=index) + >>> s + Falcon NaN + Falcon 1.0 + Parrot NaN + Parrot NaN + Parrot 3.0 + dtype: float64 + >>> s.groupby(level=0).bfill() + Falcon 1.0 + Falcon 1.0 + Parrot 3.0 + Parrot 3.0 + Parrot 3.0 + dtype: float64 + >>> s.groupby(level=0).bfill(limit=1) + Falcon 1.0 + Falcon 1.0 + Parrot NaN + Parrot 3.0 + Parrot 3.0 + dtype: float64 + + With DataFrame: + + >>> df = pd.DataFrame({'A': [1, None, None, None, 4], + ... 'B': [None, None, 5, None, 7]}, index=index) + >>> df + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot NaN 5.0 + Parrot NaN NaN + Parrot 4.0 7.0 + >>> df.groupby(level=0).bfill() + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot 4.0 5.0 + Parrot 4.0 7.0 + Parrot 4.0 7.0 + >>> df.groupby(level=0).bfill(limit=1) + A B + Falcon 1.0 NaN + Falcon NaN NaN + Parrot NaN 5.0 + Parrot 4.0 7.0 + Parrot 4.0 7.0 + """ + return self._fill("bfill", limit=limit) + + @final + @property + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def nth(self) -> GroupByNthSelector: + """ + Take the nth row from each group if n is an int, otherwise a subset of rows. + + Can be either a call or an index. dropna is not available with index notation. + Index notation accepts a comma separated list of integers and slices. + + If dropna, will take the nth non-null row, dropna is either + 'all' or 'any'; this is equivalent to calling dropna(how=dropna) + before the groupby. + + Parameters + ---------- + n : int, slice or list of ints and slices + A single nth value for the row or a list of nth values or slices. + + .. versionchanged:: 1.4.0 + Added slice and lists containing slices. + Added index notation. + + dropna : {'any', 'all', None}, default None + Apply the specified dropna operation before counting which row is + the nth row. Only supported if n is an int. + + Returns + ------- + Series or DataFrame + N-th value within each group. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2], + ... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B']) + >>> g = df.groupby('A') + >>> g.nth(0) + A B + 0 1 NaN + 2 2 3.0 + >>> g.nth(1) + A B + 1 1 2.0 + 4 2 5.0 + >>> g.nth(-1) + A B + 3 1 4.0 + 4 2 5.0 + >>> g.nth([0, 1]) + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + 4 2 5.0 + >>> g.nth(slice(None, -1)) + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + + Index notation may also be used + + >>> g.nth[0, 1] + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + 4 2 5.0 + >>> g.nth[:-1] + A B + 0 1 NaN + 1 1 2.0 + 2 2 3.0 + + Specifying `dropna` allows ignoring ``NaN`` values + + >>> g.nth(0, dropna='any') + A B + 1 1 2.0 + 2 2 3.0 + + When the specified ``n`` is larger than any of the groups, an + empty DataFrame is returned + + >>> g.nth(3, dropna='any') + Empty DataFrame + Columns: [A, B] + Index: [] + """ + return GroupByNthSelector(self) + + def _nth( + self, + n: PositionalIndexer | tuple, + dropna: Literal["any", "all", None] = None, + ) -> NDFrameT: + if not dropna: + mask = self._make_mask_from_positional_indexer(n) + + ids, _, _ = self._grouper.group_info + + # Drop NA values in grouping + mask = mask & (ids != -1) + + out = self._mask_selected_obj(mask) + return out + + # dropna is truthy + if not is_integer(n): + raise ValueError("dropna option only supported for an integer argument") + + if dropna not in ["any", "all"]: + # Note: when agg-ing picker doesn't raise this, just returns NaN + raise ValueError( + "For a DataFrame or Series groupby.nth, dropna must be " + "either None, 'any' or 'all', " + f"(was passed {dropna})." + ) + + # old behaviour, but with all and any support for DataFrames. + # modified in GH 7559 to have better perf + n = cast(int, n) + dropped = self._selected_obj.dropna(how=dropna, axis=self.axis) + + # get a new grouper for our dropped obj + grouper: np.ndarray | Index | ops.BaseGrouper + if len(dropped) == len(self._selected_obj): + # Nothing was dropped, can use the same grouper + grouper = self._grouper + else: + # we don't have the grouper info available + # (e.g. we have selected out + # a column that is not in the current object) + axis = self._grouper.axis + grouper = self._grouper.codes_info[axis.isin(dropped.index)] + if self._grouper.has_dropped_na: + # Null groups need to still be encoded as -1 when passed to groupby + nulls = grouper == -1 + # error: No overload variant of "where" matches argument types + # "Any", "NAType", "Any" + values = np.where(nulls, NA, grouper) # type: ignore[call-overload] + grouper = Index(values, dtype="Int64") + + if self.axis == 1: + grb = dropped.T.groupby(grouper, as_index=self.as_index, sort=self.sort) + else: + grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort) + return grb.nth(n) + + @final + def quantile( + self, + q: float | AnyArrayLike = 0.5, + interpolation: str = "linear", + numeric_only: bool = False, + ): + """ + Return group values at the given quantile, a la numpy.percentile. + + Parameters + ---------- + q : float or array-like, default 0.5 (50% quantile) + Value(s) between 0 and 1 providing the quantile(s) to compute. + interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} + Method to use when the desired quantile falls between two points. + numeric_only : bool, default False + Include only `float`, `int` or `boolean` data. + + .. versionadded:: 1.5.0 + + .. versionchanged:: 2.0.0 + + numeric_only now defaults to ``False``. + + Returns + ------- + Series or DataFrame + Return type determined by caller of GroupBy object. + + See Also + -------- + Series.quantile : Similar method for Series. + DataFrame.quantile : Similar method for DataFrame. + numpy.percentile : NumPy method to compute qth percentile. + + Examples + -------- + >>> df = pd.DataFrame([ + ... ['a', 1], ['a', 2], ['a', 3], + ... ['b', 1], ['b', 3], ['b', 5] + ... ], columns=['key', 'val']) + >>> df.groupby('key').quantile() + val + key + a 2.0 + b 3.0 + """ + mgr = self._get_data_to_aggregate(numeric_only=numeric_only, name="quantile") + obj = self._wrap_agged_manager(mgr) + if self.axis == 1: + splitter = self._grouper._get_splitter(obj.T, axis=self.axis) + sdata = splitter._sorted_data.T + else: + splitter = self._grouper._get_splitter(obj, axis=self.axis) + sdata = splitter._sorted_data + + starts, ends = lib.generate_slices(splitter._slabels, splitter.ngroups) + + def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, DtypeObj | None]: + if is_object_dtype(vals.dtype): + raise TypeError( + "'quantile' cannot be performed against 'object' dtypes!" + ) + + inference: DtypeObj | None = None + if isinstance(vals, BaseMaskedArray) and is_numeric_dtype(vals.dtype): + out = vals.to_numpy(dtype=float, na_value=np.nan) + inference = vals.dtype + elif is_integer_dtype(vals.dtype): + if isinstance(vals, ExtensionArray): + out = vals.to_numpy(dtype=float, na_value=np.nan) + else: + out = vals + inference = np.dtype(np.int64) + elif is_bool_dtype(vals.dtype) and isinstance(vals, ExtensionArray): + out = vals.to_numpy(dtype=float, na_value=np.nan) + elif is_bool_dtype(vals.dtype): + # GH#51424 deprecate to match Series/DataFrame behavior + warnings.warn( + f"Allowing bool dtype in {type(self).__name__}.quantile is " + "deprecated and will raise in a future version, matching " + "the Series/DataFrame behavior. Cast to uint8 dtype before " + "calling quantile instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + out = np.asarray(vals) + elif needs_i8_conversion(vals.dtype): + inference = vals.dtype + # In this case we need to delay the casting until after the + # np.lexsort below. + # error: Incompatible return value type (got + # "Tuple[Union[ExtensionArray, ndarray[Any, Any]], Union[Any, + # ExtensionDtype]]", expected "Tuple[ndarray[Any, Any], + # Optional[Union[dtype[Any], ExtensionDtype]]]") + return vals, inference # type: ignore[return-value] + elif isinstance(vals, ExtensionArray) and is_float_dtype(vals.dtype): + inference = np.dtype(np.float64) + out = vals.to_numpy(dtype=float, na_value=np.nan) + else: + out = np.asarray(vals) + + return out, inference + + def post_processor( + vals: np.ndarray, + inference: DtypeObj | None, + result_mask: np.ndarray | None, + orig_vals: ArrayLike, + ) -> ArrayLike: + if inference: + # Check for edge case + if isinstance(orig_vals, BaseMaskedArray): + assert result_mask is not None # for mypy + + if interpolation in {"linear", "midpoint"} and not is_float_dtype( + orig_vals + ): + return FloatingArray(vals, result_mask) + else: + # Item "ExtensionDtype" of "Union[ExtensionDtype, str, + # dtype[Any], Type[object]]" has no attribute "numpy_dtype" + # [union-attr] + with warnings.catch_warnings(): + # vals.astype with nan can warn with numpy >1.24 + warnings.filterwarnings("ignore", category=RuntimeWarning) + return type(orig_vals)( + vals.astype( + inference.numpy_dtype # type: ignore[union-attr] + ), + result_mask, + ) + + elif not ( + is_integer_dtype(inference) + and interpolation in {"linear", "midpoint"} + ): + if needs_i8_conversion(inference): + # error: Item "ExtensionArray" of "Union[ExtensionArray, + # ndarray[Any, Any]]" has no attribute "_ndarray" + vals = vals.astype("i8").view( + orig_vals._ndarray.dtype # type: ignore[union-attr] + ) + # error: Item "ExtensionArray" of "Union[ExtensionArray, + # ndarray[Any, Any]]" has no attribute "_from_backing_data" + return orig_vals._from_backing_data( # type: ignore[union-attr] + vals + ) + + assert isinstance(inference, np.dtype) # for mypy + return vals.astype(inference) + + return vals + + qs = np.array(q, dtype=np.float64) + pass_qs: np.ndarray | None = qs + if is_scalar(q): + qs = np.array([q], dtype=np.float64) + pass_qs = None + + ids, _, ngroups = self._grouper.group_info + nqs = len(qs) + + func = partial( + libgroupby.group_quantile, + labels=ids, + qs=qs, + interpolation=interpolation, + starts=starts, + ends=ends, + ) + + def blk_func(values: ArrayLike) -> ArrayLike: + orig_vals = values + if isinstance(values, BaseMaskedArray): + mask = values._mask + result_mask = np.zeros((ngroups, nqs), dtype=np.bool_) + else: + mask = isna(values) + result_mask = None + + is_datetimelike = needs_i8_conversion(values.dtype) + + vals, inference = pre_processor(values) + + ncols = 1 + if vals.ndim == 2: + ncols = vals.shape[0] + + out = np.empty((ncols, ngroups, nqs), dtype=np.float64) + + if is_datetimelike: + vals = vals.view("i8") + + if vals.ndim == 1: + # EA is always 1d + func( + out[0], + values=vals, + mask=mask, + result_mask=result_mask, + is_datetimelike=is_datetimelike, + ) + else: + for i in range(ncols): + func( + out[i], + values=vals[i], + mask=mask[i], + result_mask=None, + is_datetimelike=is_datetimelike, + ) + + if vals.ndim == 1: + out = out.ravel("K") + if result_mask is not None: + result_mask = result_mask.ravel("K") + else: + out = out.reshape(ncols, ngroups * nqs) + + return post_processor(out, inference, result_mask, orig_vals) + + res_mgr = sdata._mgr.grouped_reduce(blk_func) + + res = self._wrap_agged_manager(res_mgr) + return self._wrap_aggregated_output(res, qs=pass_qs) + + @final + @Substitution(name="groupby") + def ngroup(self, ascending: bool = True): + """ + Number each group from 0 to the number of groups - 1. + + This is the enumerative complement of cumcount. Note that the + numbers given to the groups match the order in which the groups + would be seen when iterating over the groupby object, not the + order they are first observed. + + Groups with missing keys (where `pd.isna()` is True) will be labeled with `NaN` + and will be skipped from the count. + + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from number of group - 1 to 0. + + Returns + ------- + Series + Unique numbers for each group. + + See Also + -------- + .cumcount : Number the rows in each group. + + Examples + -------- + >>> df = pd.DataFrame({"color": ["red", None, "red", "blue", "blue", "red"]}) + >>> df + color + 0 red + 1 None + 2 red + 3 blue + 4 blue + 5 red + >>> df.groupby("color").ngroup() + 0 1.0 + 1 NaN + 2 1.0 + 3 0.0 + 4 0.0 + 5 1.0 + dtype: float64 + >>> df.groupby("color", dropna=False).ngroup() + 0 1 + 1 2 + 2 1 + 3 0 + 4 0 + 5 1 + dtype: int64 + >>> df.groupby("color", dropna=False).ngroup(ascending=False) + 0 1 + 1 0 + 2 1 + 3 2 + 4 2 + 5 1 + dtype: int64 + """ + obj = self._obj_with_exclusions + index = obj._get_axis(self.axis) + comp_ids = self._grouper.group_info[0] + + dtype: type + if self._grouper.has_dropped_na: + comp_ids = np.where(comp_ids == -1, np.nan, comp_ids) + dtype = np.float64 + else: + dtype = np.int64 + + if any(ping._passed_categorical for ping in self._grouper.groupings): + # comp_ids reflect non-observed groups, we need only observed + comp_ids = rank_1d(comp_ids, ties_method="dense") - 1 + + result = self._obj_1d_constructor(comp_ids, index, dtype=dtype) + if not ascending: + result = self.ngroups - 1 - result + return result + + @final + @Substitution(name="groupby") + def cumcount(self, ascending: bool = True): + """ + Number each item in each group from 0 to the length of that group - 1. + + Essentially this is equivalent to + + .. code-block:: python + + self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) + + Parameters + ---------- + ascending : bool, default True + If False, number in reverse, from length of group - 1 to 0. + + Returns + ------- + Series + Sequence number of each element within each group. + + See Also + -------- + .ngroup : Number the groups themselves. + + Examples + -------- + >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], + ... columns=['A']) + >>> df + A + 0 a + 1 a + 2 a + 3 b + 4 b + 5 a + >>> df.groupby('A').cumcount() + 0 0 + 1 1 + 2 2 + 3 0 + 4 1 + 5 3 + dtype: int64 + >>> df.groupby('A').cumcount(ascending=False) + 0 3 + 1 2 + 2 1 + 3 1 + 4 0 + 5 0 + dtype: int64 + """ + index = self._obj_with_exclusions._get_axis(self.axis) + cumcounts = self._cumcount_array(ascending=ascending) + return self._obj_1d_constructor(cumcounts, index) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def rank( + self, + method: str = "average", + ascending: bool = True, + na_option: str = "keep", + pct: bool = False, + axis: AxisInt | lib.NoDefault = lib.no_default, + ) -> NDFrameT: + """ + Provide the rank of values within each group. + + Parameters + ---------- + method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' + * average: average rank of group. + * min: lowest rank in group. + * max: highest rank in group. + * first: ranks assigned in order they appear in the array. + * dense: like 'min', but rank always increases by 1 between groups. + ascending : bool, default True + False for ranks by high (1) to low (N). + na_option : {'keep', 'top', 'bottom'}, default 'keep' + * keep: leave NA values where they are. + * top: smallest rank if ascending. + * bottom: smallest rank if descending. + pct : bool, default False + Compute percentage rank of data within each group. + axis : int, default 0 + The axis of the object over which to compute the rank. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + Returns + ------- + DataFrame with ranking of values within each group + %(see_also)s + Examples + -------- + >>> df = pd.DataFrame( + ... { + ... "group": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], + ... "value": [2, 4, 2, 3, 5, 1, 2, 4, 1, 5], + ... } + ... ) + >>> df + group value + 0 a 2 + 1 a 4 + 2 a 2 + 3 a 3 + 4 a 5 + 5 b 1 + 6 b 2 + 7 b 4 + 8 b 1 + 9 b 5 + >>> for method in ['average', 'min', 'max', 'dense', 'first']: + ... df[f'{method}_rank'] = df.groupby('group')['value'].rank(method) + >>> df + group value average_rank min_rank max_rank dense_rank first_rank + 0 a 2 1.5 1.0 2.0 1.0 1.0 + 1 a 4 4.0 4.0 4.0 3.0 4.0 + 2 a 2 1.5 1.0 2.0 1.0 2.0 + 3 a 3 3.0 3.0 3.0 2.0 3.0 + 4 a 5 5.0 5.0 5.0 4.0 5.0 + 5 b 1 1.5 1.0 2.0 1.0 1.0 + 6 b 2 3.0 3.0 3.0 2.0 3.0 + 7 b 4 4.0 4.0 4.0 3.0 4.0 + 8 b 1 1.5 1.0 2.0 1.0 2.0 + 9 b 5 5.0 5.0 5.0 4.0 5.0 + """ + if na_option not in {"keep", "top", "bottom"}: + msg = "na_option must be one of 'keep', 'top', or 'bottom'" + raise ValueError(msg) + + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "rank") + else: + axis = 0 + + kwargs = { + "ties_method": method, + "ascending": ascending, + "na_option": na_option, + "pct": pct, + } + if axis != 0: + # DataFrame uses different keyword name + kwargs["method"] = kwargs.pop("ties_method") + f = lambda x: x.rank(axis=axis, numeric_only=False, **kwargs) + result = self._python_apply_general( + f, self._selected_obj, is_transform=True + ) + return result + + return self._cython_transform( + "rank", + numeric_only=False, + axis=axis, + **kwargs, + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cumprod( + self, axis: Axis | lib.NoDefault = lib.no_default, *args, **kwargs + ) -> NDFrameT: + """ + Cumulative product for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([6, 2, 0], index=lst) + >>> ser + a 6 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).cumprod() + a 6 + a 12 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 8 2 + horse 1 2 5 + bull 2 6 9 + >>> df.groupby("a").groups + {1: ['cow', 'horse'], 2: ['bull']} + >>> df.groupby("a").cumprod() + b c + cow 8 2 + horse 16 10 + bull 6 9 + """ + nv.validate_groupby_func("cumprod", args, kwargs, ["numeric_only", "skipna"]) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cumprod") + else: + axis = 0 + + if axis != 0: + f = lambda x: x.cumprod(axis=axis, **kwargs) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + return self._cython_transform("cumprod", **kwargs) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cumsum( + self, axis: Axis | lib.NoDefault = lib.no_default, *args, **kwargs + ) -> NDFrameT: + """ + Cumulative sum for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b'] + >>> ser = pd.Series([6, 2, 0], index=lst) + >>> ser + a 6 + a 2 + b 0 + dtype: int64 + >>> ser.groupby(level=0).cumsum() + a 6 + a 8 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 2, 5], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["fox", "gorilla", "lion"]) + >>> df + a b c + fox 1 8 2 + gorilla 1 2 5 + lion 2 6 9 + >>> df.groupby("a").groups + {1: ['fox', 'gorilla'], 2: ['lion']} + >>> df.groupby("a").cumsum() + b c + fox 8 2 + gorilla 10 7 + lion 6 9 + """ + nv.validate_groupby_func("cumsum", args, kwargs, ["numeric_only", "skipna"]) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cumsum") + else: + axis = 0 + + if axis != 0: + f = lambda x: x.cumsum(axis=axis, **kwargs) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + return self._cython_transform("cumsum", **kwargs) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cummin( + self, + axis: AxisInt | lib.NoDefault = lib.no_default, + numeric_only: bool = False, + **kwargs, + ) -> NDFrameT: + """ + Cumulative min for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 6, 2, 3, 0, 4], index=lst) + >>> ser + a 1 + a 6 + a 2 + b 3 + b 0 + b 4 + dtype: int64 + >>> ser.groupby(level=0).cummin() + a 1 + a 1 + a 1 + b 3 + b 0 + b 0 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 0, 2], [1, 1, 5], [6, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["snake", "rabbit", "turtle"]) + >>> df + a b c + snake 1 0 2 + rabbit 1 1 5 + turtle 6 6 9 + >>> df.groupby("a").groups + {1: ['snake', 'rabbit'], 6: ['turtle']} + >>> df.groupby("a").cummin() + b c + snake 0 2 + rabbit 0 2 + turtle 6 9 + """ + skipna = kwargs.get("skipna", True) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cummin") + else: + axis = 0 + + if axis != 0: + f = lambda x: np.minimum.accumulate(x, axis) + obj = self._selected_obj + if numeric_only: + obj = obj._get_numeric_data() + return self._python_apply_general(f, obj, is_transform=True) + + return self._cython_transform( + "cummin", numeric_only=numeric_only, skipna=skipna + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def cummax( + self, + axis: AxisInt | lib.NoDefault = lib.no_default, + numeric_only: bool = False, + **kwargs, + ) -> NDFrameT: + """ + Cumulative max for each group. + + Returns + ------- + Series or DataFrame + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 6, 2, 3, 1, 4], index=lst) + >>> ser + a 1 + a 6 + a 2 + b 3 + b 1 + b 4 + dtype: int64 + >>> ser.groupby(level=0).cummax() + a 1 + a 6 + a 6 + b 3 + b 3 + b 4 + dtype: int64 + + For DataFrameGroupBy: + + >>> data = [[1, 8, 2], [1, 1, 0], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["cow", "horse", "bull"]) + >>> df + a b c + cow 1 8 2 + horse 1 1 0 + bull 2 6 9 + >>> df.groupby("a").groups + {1: ['cow', 'horse'], 2: ['bull']} + >>> df.groupby("a").cummax() + b c + cow 8 2 + horse 8 2 + bull 6 9 + """ + skipna = kwargs.get("skipna", True) + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "cummax") + else: + axis = 0 + + if axis != 0: + f = lambda x: np.maximum.accumulate(x, axis) + obj = self._selected_obj + if numeric_only: + obj = obj._get_numeric_data() + return self._python_apply_general(f, obj, is_transform=True) + + return self._cython_transform( + "cummax", numeric_only=numeric_only, skipna=skipna + ) + + @final + @Substitution(name="groupby") + def shift( + self, + periods: int | Sequence[int] = 1, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + fill_value=lib.no_default, + suffix: str | None = None, + ): + """ + Shift each group by periods observations. + + If freq is passed, the index will be increased using the periods and the freq. + + Parameters + ---------- + periods : int | Sequence[int], default 1 + Number of periods to shift. If a list of values, shift each group by + each period. + freq : str, optional + Frequency string. + axis : axis to shift, default 0 + Shift direction. + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + fill_value : optional + The scalar value to use for newly introduced missing values. + + .. versionchanged:: 2.1.0 + Will raise a ``ValueError`` if ``freq`` is provided too. + + suffix : str, optional + A string to add to each shifted column if there are multiple periods. + Ignored otherwise. + + Returns + ------- + Series or DataFrame + Object shifted within each group. + + See Also + -------- + Index.shift : Shift values of Index. + + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).shift(1) + a NaN + a 1.0 + b NaN + b 3.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 2 3 + salmon 1 5 6 + catfish 2 5 8 + goldfish 2 6 9 + >>> df.groupby("a").shift(1) + b c + tuna NaN NaN + salmon 2.0 3.0 + catfish NaN NaN + goldfish 5.0 8.0 + """ + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "shift") + else: + axis = 0 + + if is_list_like(periods): + if axis == 1: + raise ValueError( + "If `periods` contains multiple shifts, `axis` cannot be 1." + ) + periods = cast(Sequence, periods) + if len(periods) == 0: + raise ValueError("If `periods` is an iterable, it cannot be empty.") + from pandas.core.reshape.concat import concat + + add_suffix = True + else: + if not is_integer(periods): + raise TypeError( + f"Periods must be integer, but {periods} is {type(periods)}." + ) + if suffix: + raise ValueError("Cannot specify `suffix` if `periods` is an int.") + periods = [cast(int, periods)] + add_suffix = False + + shifted_dataframes = [] + for period in periods: + if not is_integer(period): + raise TypeError( + f"Periods must be integer, but {period} is {type(period)}." + ) + period = cast(int, period) + if freq is not None or axis != 0: + f = lambda x: x.shift( + period, freq, axis, fill_value # pylint: disable=cell-var-from-loop + ) + shifted = self._python_apply_general( + f, self._selected_obj, is_transform=True + ) + else: + if fill_value is lib.no_default: + fill_value = None + ids, _, ngroups = self._grouper.group_info + res_indexer = np.zeros(len(ids), dtype=np.int64) + + libgroupby.group_shift_indexer(res_indexer, ids, ngroups, period) + + obj = self._obj_with_exclusions + + shifted = obj._reindex_with_indexers( + {self.axis: (obj.axes[self.axis], res_indexer)}, + fill_value=fill_value, + allow_dups=True, + ) + + if add_suffix: + if isinstance(shifted, Series): + shifted = cast(NDFrameT, shifted.to_frame()) + shifted = shifted.add_suffix( + f"{suffix}_{period}" if suffix else f"_{period}" + ) + shifted_dataframes.append(cast(Union[Series, DataFrame], shifted)) + + return ( + shifted_dataframes[0] + if len(shifted_dataframes) == 1 + else concat(shifted_dataframes, axis=1) + ) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def diff( + self, periods: int = 1, axis: AxisInt | lib.NoDefault = lib.no_default + ) -> NDFrameT: + """ + First discrete difference of element. + + Calculates the difference of each element compared with another + element in the group (default is element in previous row). + + Parameters + ---------- + periods : int, default 1 + Periods to shift for calculating difference, accepts negative values. + axis : axis to shift, default 0 + Take difference over rows (0) or columns (1). + + .. deprecated:: 2.1.0 + For axis=1, operate on the underlying object instead. Otherwise + the axis keyword is not necessary. + + Returns + ------- + Series or DataFrame + First differences. + %(see_also)s + Examples + -------- + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([7, 2, 8, 4, 3, 3], index=lst) + >>> ser + a 7 + a 2 + a 8 + b 4 + b 3 + b 3 + dtype: int64 + >>> ser.groupby(level=0).diff() + a NaN + a -5.0 + a 6.0 + b NaN + b -1.0 + b 0.0 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = {'a': [1, 3, 5, 7, 7, 8, 3], 'b': [1, 4, 8, 4, 4, 2, 1]} + >>> df = pd.DataFrame(data, index=['dog', 'dog', 'dog', + ... 'mouse', 'mouse', 'mouse', 'mouse']) + >>> df + a b + dog 1 1 + dog 3 4 + dog 5 8 + mouse 7 4 + mouse 7 4 + mouse 8 2 + mouse 3 1 + >>> df.groupby(level=0).diff() + a b + dog NaN NaN + dog 2.0 3.0 + dog 2.0 4.0 + mouse NaN NaN + mouse 0.0 0.0 + mouse 1.0 -2.0 + mouse -5.0 -1.0 + """ + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "diff") + else: + axis = 0 + + if axis != 0: + return self.apply(lambda x: x.diff(periods=periods, axis=axis)) + + obj = self._obj_with_exclusions + shifted = self.shift(periods=periods) + + # GH45562 - to retain existing behavior and match behavior of Series.diff(), + # int8 and int16 are coerced to float32 rather than float64. + dtypes_to_f32 = ["int8", "int16"] + if obj.ndim == 1: + if obj.dtype in dtypes_to_f32: + shifted = shifted.astype("float32") + else: + to_coerce = [c for c, dtype in obj.dtypes.items() if dtype in dtypes_to_f32] + if len(to_coerce): + shifted = shifted.astype({c: "float32" for c in to_coerce}) + + return obj - shifted + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def pct_change( + self, + periods: int = 1, + fill_method: FillnaOptions | None | lib.NoDefault = lib.no_default, + limit: int | None | lib.NoDefault = lib.no_default, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + ): + """ + Calculate pct_change of each value to previous entry in group. + + Returns + ------- + Series or DataFrame + Percentage changes within each group. + %(see_also)s + Examples + -------- + + For SeriesGroupBy: + + >>> lst = ['a', 'a', 'b', 'b'] + >>> ser = pd.Series([1, 2, 3, 4], index=lst) + >>> ser + a 1 + a 2 + b 3 + b 4 + dtype: int64 + >>> ser.groupby(level=0).pct_change() + a NaN + a 1.000000 + b NaN + b 0.333333 + dtype: float64 + + For DataFrameGroupBy: + + >>> data = [[1, 2, 3], [1, 5, 6], [2, 5, 8], [2, 6, 9]] + >>> df = pd.DataFrame(data, columns=["a", "b", "c"], + ... index=["tuna", "salmon", "catfish", "goldfish"]) + >>> df + a b c + tuna 1 2 3 + salmon 1 5 6 + catfish 2 5 8 + goldfish 2 6 9 + >>> df.groupby("a").pct_change() + b c + tuna NaN NaN + salmon 1.5 1.000 + catfish NaN NaN + goldfish 0.2 0.125 + """ + # GH#53491 + if fill_method not in (lib.no_default, None) or limit is not lib.no_default: + warnings.warn( + "The 'fill_method' keyword being not None and the 'limit' keyword in " + f"{type(self).__name__}.pct_change are deprecated and will be removed " + "in a future version. Either fill in any non-leading NA values prior " + "to calling pct_change or specify 'fill_method=None' to not fill NA " + "values.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if fill_method is lib.no_default: + if limit is lib.no_default and any( + grp.isna().values.any() for _, grp in self + ): + warnings.warn( + "The default fill_method='ffill' in " + f"{type(self).__name__}.pct_change is deprecated and will " + "be removed in a future version. Either fill in any " + "non-leading NA values prior to calling pct_change or " + "specify 'fill_method=None' to not fill NA values.", + FutureWarning, + stacklevel=find_stack_level(), + ) + fill_method = "ffill" + if limit is lib.no_default: + limit = None + + if axis is not lib.no_default: + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, "pct_change") + else: + axis = 0 + + # TODO(GH#23918): Remove this conditional for SeriesGroupBy when + # GH#23918 is fixed + if freq is not None or axis != 0: + f = lambda x: x.pct_change( + periods=periods, + fill_method=fill_method, + limit=limit, + freq=freq, + axis=axis, + ) + return self._python_apply_general(f, self._selected_obj, is_transform=True) + + if fill_method is None: # GH30463 + fill_method = "ffill" + limit = 0 + filled = getattr(self, fill_method)(limit=limit) + if self.axis == 0: + fill_grp = filled.groupby(self._grouper.codes, group_keys=self.group_keys) + else: + fill_grp = filled.T.groupby(self._grouper.codes, group_keys=self.group_keys) + shifted = fill_grp.shift(periods=periods, freq=freq) + if self.axis == 1: + shifted = shifted.T + return (filled / shifted) - 1 + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def head(self, n: int = 5) -> NDFrameT: + """ + Return first n rows of each group. + + Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows + from the original DataFrame with original index and order preserved + (``as_index`` flag is ignored). + + Parameters + ---------- + n : int + If positive: number of entries to include from start of each group. + If negative: number of entries to exclude from end of each group. + + Returns + ------- + Series or DataFrame + Subset of original Series or DataFrame as determined by n. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]], + ... columns=['A', 'B']) + >>> df.groupby('A').head(1) + A B + 0 1 2 + 2 5 6 + >>> df.groupby('A').head(-1) + A B + 0 1 2 + """ + mask = self._make_mask_from_positional_indexer(slice(None, n)) + return self._mask_selected_obj(mask) + + @final + @Substitution(name="groupby") + @Substitution(see_also=_common_see_also) + def tail(self, n: int = 5) -> NDFrameT: + """ + Return last n rows of each group. + + Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows + from the original DataFrame with original index and order preserved + (``as_index`` flag is ignored). + + Parameters + ---------- + n : int + If positive: number of entries to include from end of each group. + If negative: number of entries to exclude from start of each group. + + Returns + ------- + Series or DataFrame + Subset of original Series or DataFrame as determined by n. + %(see_also)s + Examples + -------- + + >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]], + ... columns=['A', 'B']) + >>> df.groupby('A').tail(1) + A B + 1 a 2 + 3 b 2 + >>> df.groupby('A').tail(-1) + A B + 1 a 2 + 3 b 2 + """ + if n: + mask = self._make_mask_from_positional_indexer(slice(-n, None)) + else: + mask = self._make_mask_from_positional_indexer([]) + + return self._mask_selected_obj(mask) + + @final + def _mask_selected_obj(self, mask: npt.NDArray[np.bool_]) -> NDFrameT: + """ + Return _selected_obj with mask applied to the correct axis. + + Parameters + ---------- + mask : np.ndarray[bool] + Boolean mask to apply. + + Returns + ------- + Series or DataFrame + Filtered _selected_obj. + """ + ids = self._grouper.group_info[0] + mask = mask & (ids != -1) + + if self.axis == 0: + return self._selected_obj[mask] + else: + return self._selected_obj.iloc[:, mask] + + @final + def _reindex_output( + self, + output: OutputFrameOrSeries, + fill_value: Scalar = np.nan, + qs: npt.NDArray[np.float64] | None = None, + ) -> OutputFrameOrSeries: + """ + If we have categorical groupers, then we might want to make sure that + we have a fully re-indexed output to the levels. This means expanding + the output space to accommodate all values in the cartesian product of + our groups, regardless of whether they were observed in the data or + not. This will expand the output space if there are missing groups. + + The method returns early without modifying the input if the number of + groupings is less than 2, self.observed == True or none of the groupers + are categorical. + + Parameters + ---------- + output : Series or DataFrame + Object resulting from grouping and applying an operation. + fill_value : scalar, default np.nan + Value to use for unobserved categories if self.observed is False. + qs : np.ndarray[float64] or None, default None + quantile values, only relevant for quantile. + + Returns + ------- + Series or DataFrame + Object (potentially) re-indexed to include all possible groups. + """ + groupings = self._grouper.groupings + if len(groupings) == 1: + return output + + # if we only care about the observed values + # we are done + elif self.observed: + return output + + # reindexing only applies to a Categorical grouper + elif not any( + isinstance(ping.grouping_vector, (Categorical, CategoricalIndex)) + for ping in groupings + ): + return output + + levels_list = [ping._group_index for ping in groupings] + names = self._grouper.names + if qs is not None: + # error: Argument 1 to "append" of "list" has incompatible type + # "ndarray[Any, dtype[floating[_64Bit]]]"; expected "Index" + levels_list.append(qs) # type: ignore[arg-type] + names = names + [None] + index = MultiIndex.from_product(levels_list, names=names) + if self.sort: + index = index.sort_values() + + if self.as_index: + # Always holds for SeriesGroupBy unless GH#36507 is implemented + d = { + self.obj._get_axis_name(self.axis): index, + "copy": False, + "fill_value": fill_value, + } + return output.reindex(**d) # type: ignore[arg-type] + + # GH 13204 + # Here, the categorical in-axis groupers, which need to be fully + # expanded, are columns in `output`. An idea is to do: + # output = output.set_index(self._grouper.names) + # .reindex(index).reset_index() + # but special care has to be taken because of possible not-in-axis + # groupers. + # So, we manually select and drop the in-axis grouper columns, + # reindex `output`, and then reset the in-axis grouper columns. + + # Select in-axis groupers + in_axis_grps = [ + (i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis + ] + if len(in_axis_grps) > 0: + g_nums, g_names = zip(*in_axis_grps) + output = output.drop(labels=list(g_names), axis=1) + + # Set a temp index and reindex (possibly expanding) + output = output.set_index(self._grouper.result_index).reindex( + index, copy=False, fill_value=fill_value + ) + + # Reset in-axis grouper columns + # (using level numbers `g_nums` because level names may not be unique) + if len(in_axis_grps) > 0: + output = output.reset_index(level=g_nums) + + return output.reset_index(drop=True) + + @final + def sample( + self, + n: int | None = None, + frac: float | None = None, + replace: bool = False, + weights: Sequence | Series | None = None, + random_state: RandomState | None = None, + ): + """ + Return a random sample of items from each group. + + You can use `random_state` for reproducibility. + + Parameters + ---------- + n : int, optional + Number of items to return for each group. Cannot be used with + `frac` and must be no larger than the smallest group unless + `replace` is True. Default is one if `frac` is None. + frac : float, optional + Fraction of items to return. Cannot be used with `n`. + replace : bool, default False + Allow or disallow sampling of the same row more than once. + weights : list-like, optional + Default None results in equal probability weighting. + If passed a list-like then values must have the same length as + the underlying DataFrame or Series object and will be used as + sampling probabilities after normalization within each group. + Values must be non-negative with at least one positive element + within each group. + random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional + If int, array-like, or BitGenerator, seed for random number generator. + If np.random.RandomState or np.random.Generator, use as given. + + .. versionchanged:: 1.4.0 + + np.random.Generator objects now accepted + + Returns + ------- + Series or DataFrame + A new object of same type as caller containing items randomly + sampled within each group from the caller object. + + See Also + -------- + DataFrame.sample: Generate random samples from a DataFrame object. + numpy.random.choice: Generate a random sample from a given 1-D numpy + array. + + Examples + -------- + >>> df = pd.DataFrame( + ... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)} + ... ) + >>> df + a b + 0 red 0 + 1 red 1 + 2 blue 2 + 3 blue 3 + 4 black 4 + 5 black 5 + + Select one row at random for each distinct value in column a. The + `random_state` argument can be used to guarantee reproducibility: + + >>> df.groupby("a").sample(n=1, random_state=1) + a b + 4 black 4 + 2 blue 2 + 1 red 1 + + Set `frac` to sample fixed proportions rather than counts: + + >>> df.groupby("a")["b"].sample(frac=0.5, random_state=2) + 5 5 + 2 2 + 0 0 + Name: b, dtype: int64 + + Control sample probabilities within groups by setting weights: + + >>> df.groupby("a").sample( + ... n=1, + ... weights=[1, 1, 1, 0, 0, 1], + ... random_state=1, + ... ) + a b + 5 black 5 + 2 blue 2 + 0 red 0 + """ # noqa: E501 + if self._selected_obj.empty: + # GH48459 prevent ValueError when object is empty + return self._selected_obj + size = sample.process_sampling_size(n, frac, replace) + if weights is not None: + weights_arr = sample.preprocess_weights( + self._selected_obj, weights, axis=self.axis + ) + + random_state = com.random_state(random_state) + + group_iterator = self._grouper.get_iterator(self._selected_obj, self.axis) + + sampled_indices = [] + for labels, obj in group_iterator: + grp_indices = self.indices[labels] + group_size = len(grp_indices) + if size is not None: + sample_size = size + else: + assert frac is not None + sample_size = round(frac * group_size) + + grp_sample = sample.sample( + group_size, + size=sample_size, + replace=replace, + weights=None if weights is None else weights_arr[grp_indices], + random_state=random_state, + ) + sampled_indices.append(grp_indices[grp_sample]) + + sampled_indices = np.concatenate(sampled_indices) + return self._selected_obj.take(sampled_indices, axis=self.axis) + + def _idxmax_idxmin( + self, + how: Literal["idxmax", "idxmin"], + ignore_unobserved: bool = False, + axis: Axis | None | lib.NoDefault = lib.no_default, + skipna: bool = True, + numeric_only: bool = False, + ) -> NDFrameT: + """Compute idxmax/idxmin. + + Parameters + ---------- + how : {'idxmin', 'idxmax'} + Whether to compute idxmin or idxmax. + axis : {{0 or 'index', 1 or 'columns'}}, default None + The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise. + If axis is not provided, grouper's axis is used. + numeric_only : bool, default False + Include only float, int, boolean columns. + skipna : bool, default True + Exclude NA/null values. If an entire row/column is NA, the result + will be NA. + ignore_unobserved : bool, default False + When True and an unobserved group is encountered, do not raise. This used + for transform where unobserved groups do not play an impact on the result. + + Returns + ------- + Series or DataFrame + idxmax or idxmin for the groupby operation. + """ + if axis is not lib.no_default: + if axis is None: + axis = self.axis + axis = self.obj._get_axis_number(axis) + self._deprecate_axis(axis, how) + else: + axis = self.axis + + if not self.observed and any( + ping._passed_categorical for ping in self._grouper.groupings + ): + expected_len = np.prod( + [len(ping._group_index) for ping in self._grouper.groupings] + ) + if len(self._grouper.groupings) == 1: + result_len = len(self._grouper.groupings[0].grouping_vector.unique()) + else: + # result_index only contains observed groups in this case + result_len = len(self._grouper.result_index) + assert result_len <= expected_len + has_unobserved = result_len < expected_len + + raise_err: bool | np.bool_ = not ignore_unobserved and has_unobserved + # Only raise an error if there are columns to compute; otherwise we return + # an empty DataFrame with an index (possibly including unobserved) but no + # columns + data = self._obj_with_exclusions + if raise_err and isinstance(data, DataFrame): + if numeric_only: + data = data._get_numeric_data() + raise_err = len(data.columns) > 0 + + if raise_err: + raise ValueError( + f"Can't get {how} of an empty group due to unobserved categories. " + "Specify observed=True in groupby instead." + ) + elif not skipna: + if self._obj_with_exclusions.isna().any(axis=None): + warnings.warn( + f"The behavior of {type(self).__name__}.{how} with all-NA " + "values, or any-NA and skipna=False, is deprecated. In a future " + "version this will raise ValueError", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if axis == 1: + try: + + def func(df): + method = getattr(df, how) + return method(axis=axis, skipna=skipna, numeric_only=numeric_only) + + func.__name__ = how + result = self._python_apply_general( + func, self._obj_with_exclusions, not_indexed_same=True + ) + except ValueError as err: + name = "argmax" if how == "idxmax" else "argmin" + if f"attempt to get {name} of an empty sequence" in str(err): + raise ValueError( + f"Can't get {how} of an empty group due to unobserved " + "categories. Specify observed=True in groupby instead." + ) from None + raise + return result + + result = self._agg_general( + numeric_only=numeric_only, + min_count=1, + alias=how, + skipna=skipna, + ) + return result + + def _wrap_idxmax_idxmin(self, res: NDFrameT) -> NDFrameT: + index = self.obj._get_axis(self.axis) + if res.size == 0: + result = res.astype(index.dtype) + else: + if isinstance(index, MultiIndex): + index = index.to_flat_index() + values = res._values + assert isinstance(values, np.ndarray) + na_value = na_value_for_dtype(index.dtype, compat=False) + if isinstance(res, Series): + # mypy: expression has type "Series", variable has type "NDFrameT" + result = res._constructor( # type: ignore[assignment] + index.array.take(values, allow_fill=True, fill_value=na_value), + index=res.index, + name=res.name, + ) + else: + data = {} + for k, column_values in enumerate(values.T): + data[k] = index.array.take( + column_values, allow_fill=True, fill_value=na_value + ) + result = self.obj._constructor(data, index=res.index) + result.columns = res.columns + return result + + +@doc(GroupBy) +def get_groupby( + obj: NDFrame, + by: _KeysArgType | None = None, + axis: AxisInt = 0, + grouper: ops.BaseGrouper | None = None, + group_keys: bool = True, +) -> GroupBy: + klass: type[GroupBy] + if isinstance(obj, Series): + from pandas.core.groupby.generic import SeriesGroupBy + + klass = SeriesGroupBy + elif isinstance(obj, DataFrame): + from pandas.core.groupby.generic import DataFrameGroupBy + + klass = DataFrameGroupBy + else: # pragma: no cover + raise TypeError(f"invalid type: {obj}") + + return klass( + obj=obj, + keys=by, + axis=axis, + grouper=grouper, + group_keys=group_keys, + ) + + +def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiIndex: + """ + Insert the sequence 'qs' of quantiles as the inner-most level of a MultiIndex. + + The quantile level in the MultiIndex is a repeated copy of 'qs'. + + Parameters + ---------- + idx : Index + qs : np.ndarray[float64] + + Returns + ------- + MultiIndex + """ + nqs = len(qs) + lev_codes, lev = Index(qs).factorize() + lev_codes = coerce_indexer_dtype(lev_codes, lev) + + if idx._is_multi: + idx = cast(MultiIndex, idx) + levels = list(idx.levels) + [lev] + codes = [np.repeat(x, nqs) for x in idx.codes] + [np.tile(lev_codes, len(idx))] + mi = MultiIndex(levels=levels, codes=codes, names=idx.names + [None]) + else: + nidx = len(idx) + idx_codes = coerce_indexer_dtype(np.arange(nidx), idx) + levels = [idx, lev] + codes = [np.repeat(idx_codes, nqs), np.tile(lev_codes, nidx)] + mi = MultiIndex(levels=levels, codes=codes, names=[idx.name, None]) + + return mi + + +# GH#7155 +_apply_groupings_depr = ( + "{}.{} operated on the grouping columns. This behavior is deprecated, " + "and in a future version of pandas the grouping columns will be excluded " + "from the operation. Either pass `include_groups=False` to exclude the " + "groupings or explicitly select the grouping columns after groupby to silence " + "this warning." +) diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/grouper.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/grouper.py new file mode 100644 index 0000000000000000000000000000000000000000..e2224caad9e846f5e386019e6da5b47d16ab3694 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/grouper.py @@ -0,0 +1,1102 @@ +""" +Provide user facing operators for doing the split part of the +split-apply-combine paradigm. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + final, +) +import warnings + +import numpy as np + +from pandas._config import ( + using_copy_on_write, + warn_copy_on_write, +) + +from pandas._libs import lib +from pandas._libs.tslibs import OutOfBoundsDatetime +from pandas.errors import InvalidIndexError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_list_like, + is_scalar, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas.core import algorithms +from pandas.core.arrays import ( + Categorical, + ExtensionArray, +) +import pandas.core.common as com +from pandas.core.frame import DataFrame +from pandas.core.groupby import ops +from pandas.core.groupby.categorical import recode_for_groupby +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, +) +from pandas.core.series import Series + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + ) + + from pandas._typing import ( + ArrayLike, + Axis, + NDFrameT, + npt, + ) + + from pandas.core.generic import NDFrame + + +class Grouper: + """ + A Grouper allows the user to specify a groupby instruction for an object. + + This specification will select a column via the key parameter, or if the + level and/or axis parameters are given, a level of the index of the target + object. + + If `axis` and/or `level` are passed as keywords to both `Grouper` and + `groupby`, the values passed to `Grouper` take precedence. + + Parameters + ---------- + key : str, defaults to None + Groupby key, which selects the grouping column of the target. + level : name/number, defaults to None + The level for the target index. + freq : str / frequency object, defaults to None + This will groupby the specified frequency if the target selection + (via key or level) is a datetime-like object. For full specification + of available frequencies, please see `here + `_. + axis : str, int, defaults to 0 + Number/name of the axis. + sort : bool, default to False + Whether to sort the resulting labels. + closed : {'left' or 'right'} + Closed end of interval. Only when `freq` parameter is passed. + label : {'left' or 'right'} + Interval boundary to use for labeling. + Only when `freq` parameter is passed. + convention : {'start', 'end', 'e', 's'} + If grouper is PeriodIndex and `freq` parameter is passed. + + origin : Timestamp or str, default 'start_day' + The timestamp on which to adjust the grouping. The timezone of origin must + match the timezone of the index. + If string, must be one of the following: + + - 'epoch': `origin` is 1970-01-01 + - 'start': `origin` is the first value of the timeseries + - 'start_day': `origin` is the first day at midnight of the timeseries + + - 'end': `origin` is the last value of the timeseries + - 'end_day': `origin` is the ceiling midnight of the last day + + .. versionadded:: 1.3.0 + + offset : Timedelta or str, default is None + An offset timedelta added to the origin. + + dropna : bool, default True + If True, and if group keys contain NA values, NA values together with + row/column will be dropped. If False, NA values will also be treated as + the key in groups. + + Returns + ------- + Grouper or pandas.api.typing.TimeGrouper + A TimeGrouper is returned if ``freq`` is not ``None``. Otherwise, a Grouper + is returned. + + Examples + -------- + ``df.groupby(pd.Grouper(key="Animal"))`` is equivalent to ``df.groupby('Animal')`` + + >>> df = pd.DataFrame( + ... { + ... "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"], + ... "Speed": [100, 5, 200, 300, 15], + ... } + ... ) + >>> df + Animal Speed + 0 Falcon 100 + 1 Parrot 5 + 2 Falcon 200 + 3 Falcon 300 + 4 Parrot 15 + >>> df.groupby(pd.Grouper(key="Animal")).mean() + Speed + Animal + Falcon 200.0 + Parrot 10.0 + + Specify a resample operation on the column 'Publish date' + + >>> df = pd.DataFrame( + ... { + ... "Publish date": [ + ... pd.Timestamp("2000-01-02"), + ... pd.Timestamp("2000-01-02"), + ... pd.Timestamp("2000-01-09"), + ... pd.Timestamp("2000-01-16") + ... ], + ... "ID": [0, 1, 2, 3], + ... "Price": [10, 20, 30, 40] + ... } + ... ) + >>> df + Publish date ID Price + 0 2000-01-02 0 10 + 1 2000-01-02 1 20 + 2 2000-01-09 2 30 + 3 2000-01-16 3 40 + >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean() + ID Price + Publish date + 2000-01-02 0.5 15.0 + 2000-01-09 2.0 30.0 + 2000-01-16 3.0 40.0 + + If you want to adjust the start of the bins based on a fixed timestamp: + + >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' + >>> rng = pd.date_range(start, end, freq='7min') + >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng) + >>> ts + 2000-10-01 23:30:00 0 + 2000-10-01 23:37:00 3 + 2000-10-01 23:44:00 6 + 2000-10-01 23:51:00 9 + 2000-10-01 23:58:00 12 + 2000-10-02 00:05:00 15 + 2000-10-02 00:12:00 18 + 2000-10-02 00:19:00 21 + 2000-10-02 00:26:00 24 + Freq: 7min, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min')).sum() + 2000-10-01 23:14:00 0 + 2000-10-01 23:31:00 9 + 2000-10-01 23:48:00 21 + 2000-10-02 00:05:00 54 + 2000-10-02 00:22:00 24 + Freq: 17min, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum() + 2000-10-01 23:18:00 0 + 2000-10-01 23:35:00 18 + 2000-10-01 23:52:00 27 + 2000-10-02 00:09:00 39 + 2000-10-02 00:26:00 24 + Freq: 17min, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum() + 2000-10-01 23:24:00 3 + 2000-10-01 23:41:00 15 + 2000-10-01 23:58:00 45 + 2000-10-02 00:15:00 45 + Freq: 17min, dtype: int64 + + If you want to adjust the start of the bins with an `offset` Timedelta, the two + following lines are equivalent: + + >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum() + 2000-10-01 23:30:00 9 + 2000-10-01 23:47:00 21 + 2000-10-02 00:04:00 54 + 2000-10-02 00:21:00 24 + Freq: 17min, dtype: int64 + + >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum() + 2000-10-01 23:30:00 9 + 2000-10-01 23:47:00 21 + 2000-10-02 00:04:00 54 + 2000-10-02 00:21:00 24 + Freq: 17min, dtype: int64 + + To replace the use of the deprecated `base` argument, you can now use `offset`, + in this example it is equivalent to have `base=2`: + + >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum() + 2000-10-01 23:16:00 0 + 2000-10-01 23:33:00 9 + 2000-10-01 23:50:00 36 + 2000-10-02 00:07:00 39 + 2000-10-02 00:24:00 24 + Freq: 17min, dtype: int64 + """ + + sort: bool + dropna: bool + _gpr_index: Index | None + _grouper: Index | None + + _attributes: tuple[str, ...] = ("key", "level", "freq", "axis", "sort", "dropna") + + def __new__(cls, *args, **kwargs): + if kwargs.get("freq") is not None: + from pandas.core.resample import TimeGrouper + + cls = TimeGrouper + return super().__new__(cls) + + def __init__( + self, + key=None, + level=None, + freq=None, + axis: Axis | lib.NoDefault = lib.no_default, + sort: bool = False, + dropna: bool = True, + ) -> None: + if type(self) is Grouper: + # i.e. not TimeGrouper + if axis is not lib.no_default: + warnings.warn( + "Grouper axis keyword is deprecated and will be removed in a " + "future version. To group on axis=1, use obj.T.groupby(...) " + "instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + axis = 0 + if axis is lib.no_default: + axis = 0 + + self.key = key + self.level = level + self.freq = freq + self.axis = axis + self.sort = sort + self.dropna = dropna + + self._grouper_deprecated = None + self._indexer_deprecated: npt.NDArray[np.intp] | None = None + self._obj_deprecated = None + self._gpr_index = None + self.binner = None + self._grouper = None + self._indexer: npt.NDArray[np.intp] | None = None + + def _get_grouper( + self, obj: NDFrameT, validate: bool = True + ) -> tuple[ops.BaseGrouper, NDFrameT]: + """ + Parameters + ---------- + obj : Series or DataFrame + validate : bool, default True + if True, validate the grouper + + Returns + ------- + a tuple of grouper, obj (possibly sorted) + """ + obj, _, _ = self._set_grouper(obj) + grouper, _, obj = get_grouper( + obj, + [self.key], + axis=self.axis, + level=self.level, + sort=self.sort, + validate=validate, + dropna=self.dropna, + ) + # Without setting this, subsequent lookups to .groups raise + # error: Incompatible types in assignment (expression has type "BaseGrouper", + # variable has type "None") + self._grouper_deprecated = grouper # type: ignore[assignment] + + return grouper, obj + + def _set_grouper( + self, obj: NDFrameT, sort: bool = False, *, gpr_index: Index | None = None + ) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]: + """ + given an object and the specifications, setup the internal grouper + for this particular specification + + Parameters + ---------- + obj : Series or DataFrame + sort : bool, default False + whether the resulting grouper should be sorted + gpr_index : Index or None, default None + + Returns + ------- + NDFrame + Index + np.ndarray[np.intp] | None + """ + assert obj is not None + + if self.key is not None and self.level is not None: + raise ValueError("The Grouper cannot specify both a key and a level!") + + # Keep self._grouper value before overriding + if self._grouper is None: + # TODO: What are we assuming about subsequent calls? + self._grouper = gpr_index + self._indexer = self._indexer_deprecated + + # the key must be a valid info item + if self.key is not None: + key = self.key + # The 'on' is already defined + if getattr(gpr_index, "name", None) == key and isinstance(obj, Series): + # Sometimes self._grouper will have been resorted while + # obj has not. In this case there is a mismatch when we + # call self._grouper.take(obj.index) so we need to undo the sorting + # before we call _grouper.take. + assert self._grouper is not None + if self._indexer is not None: + reverse_indexer = self._indexer.argsort() + unsorted_ax = self._grouper.take(reverse_indexer) + ax = unsorted_ax.take(obj.index) + else: + ax = self._grouper.take(obj.index) + else: + if key not in obj._info_axis: + raise KeyError(f"The grouper name {key} is not found") + ax = Index(obj[key], name=key) + + else: + ax = obj._get_axis(self.axis) + if self.level is not None: + level = self.level + + # if a level is given it must be a mi level or + # equivalent to the axis name + if isinstance(ax, MultiIndex): + level = ax._get_level_number(level) + ax = Index(ax._get_level_values(level), name=ax.names[level]) + + else: + if level not in (0, ax.name): + raise ValueError(f"The level {level} is not valid") + + # possibly sort + indexer: npt.NDArray[np.intp] | None = None + if (self.sort or sort) and not ax.is_monotonic_increasing: + # use stable sort to support first, last, nth + # TODO: why does putting na_position="first" fix datetimelike cases? + indexer = self._indexer_deprecated = ax.array.argsort( + kind="mergesort", na_position="first" + ) + ax = ax.take(indexer) + obj = obj.take(indexer, axis=self.axis) + + # error: Incompatible types in assignment (expression has type + # "NDFrameT", variable has type "None") + self._obj_deprecated = obj # type: ignore[assignment] + self._gpr_index = ax + return obj, ax, indexer + + @final + @property + def ax(self) -> Index: + warnings.warn( + f"{type(self).__name__}.ax is deprecated and will be removed in a " + "future version. Use Resampler.ax instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + index = self._gpr_index + if index is None: + raise ValueError("_set_grouper must be called before ax is accessed") + return index + + @final + @property + def indexer(self): + warnings.warn( + f"{type(self).__name__}.indexer is deprecated and will be removed " + "in a future version. Use Resampler.indexer instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._indexer_deprecated + + @final + @property + def obj(self): + # TODO(3.0): enforcing these deprecations on Grouper should close + # GH#25564, GH#41930 + warnings.warn( + f"{type(self).__name__}.obj is deprecated and will be removed " + "in a future version. Use GroupBy.indexer instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._obj_deprecated + + @final + @property + def grouper(self): + warnings.warn( + f"{type(self).__name__}.grouper is deprecated and will be removed " + "in a future version. Use GroupBy.grouper instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return self._grouper_deprecated + + @final + @property + def groups(self): + warnings.warn( + f"{type(self).__name__}.groups is deprecated and will be removed " + "in a future version. Use GroupBy.groups instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + # error: "None" has no attribute "groups" + return self._grouper_deprecated.groups # type: ignore[attr-defined] + + @final + def __repr__(self) -> str: + attrs_list = ( + f"{attr_name}={repr(getattr(self, attr_name))}" + for attr_name in self._attributes + if getattr(self, attr_name) is not None + ) + attrs = ", ".join(attrs_list) + cls_name = type(self).__name__ + return f"{cls_name}({attrs})" + + +@final +class Grouping: + """ + Holds the grouping information for a single key + + Parameters + ---------- + index : Index + grouper : + obj : DataFrame or Series + name : Label + level : + observed : bool, default False + If we are a Categorical, use the observed values + in_axis : if the Grouping is a column in self.obj and hence among + Groupby.exclusions list + dropna : bool, default True + Whether to drop NA groups. + uniques : Array-like, optional + When specified, will be used for unique values. Enables including empty groups + in the result for a BinGrouper. Must not contain duplicates. + + Attributes + ------- + indices : dict + Mapping of {group -> index_list} + codes : ndarray + Group codes + group_index : Index or None + unique groups + groups : dict + Mapping of {group -> label_list} + """ + + _codes: npt.NDArray[np.signedinteger] | None = None + _all_grouper: Categorical | None + _orig_cats: Index | None + _index: Index + + def __init__( + self, + index: Index, + grouper=None, + obj: NDFrame | None = None, + level=None, + sort: bool = True, + observed: bool = False, + in_axis: bool = False, + dropna: bool = True, + uniques: ArrayLike | None = None, + ) -> None: + self.level = level + self._orig_grouper = grouper + grouping_vector = _convert_grouper(index, grouper) + self._all_grouper = None + self._orig_cats = None + self._index = index + self._sort = sort + self.obj = obj + self._observed = observed + self.in_axis = in_axis + self._dropna = dropna + self._uniques = uniques + + # we have a single grouper which may be a myriad of things, + # some of which are dependent on the passing in level + + ilevel = self._ilevel + if ilevel is not None: + # In extant tests, the new self.grouping_vector matches + # `index.get_level_values(ilevel)` whenever + # mapper is None and isinstance(index, MultiIndex) + if isinstance(index, MultiIndex): + index_level = index.get_level_values(ilevel) + else: + index_level = index + + if grouping_vector is None: + grouping_vector = index_level + else: + mapper = grouping_vector + grouping_vector = index_level.map(mapper) + + # a passed Grouper like, directly get the grouper in the same way + # as single grouper groupby, use the group_info to get codes + elif isinstance(grouping_vector, Grouper): + # get the new grouper; we already have disambiguated + # what key/level refer to exactly, don't need to + # check again as we have by this point converted these + # to an actual value (rather than a pd.Grouper) + assert self.obj is not None # for mypy + newgrouper, newobj = grouping_vector._get_grouper(self.obj, validate=False) + self.obj = newobj + + if isinstance(newgrouper, ops.BinGrouper): + # TODO: can we unwrap this and get a tighter typing + # for self.grouping_vector? + grouping_vector = newgrouper + else: + # ops.BaseGrouper + # TODO: 2023-02-03 no test cases with len(newgrouper.groupings) > 1. + # If that were to occur, would we be throwing out information? + # error: Cannot determine type of "grouping_vector" [has-type] + ng = newgrouper.groupings[0].grouping_vector # type: ignore[has-type] + # use Index instead of ndarray so we can recover the name + grouping_vector = Index(ng, name=newgrouper.result_index.name) + + elif not isinstance( + grouping_vector, (Series, Index, ExtensionArray, np.ndarray) + ): + # no level passed + if getattr(grouping_vector, "ndim", 1) != 1: + t = str(type(grouping_vector)) + raise ValueError(f"Grouper for '{t}' not 1-dimensional") + + grouping_vector = index.map(grouping_vector) + + if not ( + hasattr(grouping_vector, "__len__") + and len(grouping_vector) == len(index) + ): + grper = pprint_thing(grouping_vector) + errmsg = ( + "Grouper result violates len(labels) == " + f"len(data)\nresult: {grper}" + ) + raise AssertionError(errmsg) + + if isinstance(grouping_vector, np.ndarray): + if grouping_vector.dtype.kind in "mM": + # if we have a date/time-like grouper, make sure that we have + # Timestamps like + # TODO 2022-10-08 we only have one test that gets here and + # values are already in nanoseconds in that case. + grouping_vector = Series(grouping_vector).to_numpy() + elif isinstance(getattr(grouping_vector, "dtype", None), CategoricalDtype): + # a passed Categorical + self._orig_cats = grouping_vector.categories + grouping_vector, self._all_grouper = recode_for_groupby( + grouping_vector, sort, observed + ) + + self.grouping_vector = grouping_vector + + def __repr__(self) -> str: + return f"Grouping({self.name})" + + def __iter__(self) -> Iterator: + return iter(self.indices) + + @cache_readonly + def _passed_categorical(self) -> bool: + dtype = getattr(self.grouping_vector, "dtype", None) + return isinstance(dtype, CategoricalDtype) + + @cache_readonly + def name(self) -> Hashable: + ilevel = self._ilevel + if ilevel is not None: + return self._index.names[ilevel] + + if isinstance(self._orig_grouper, (Index, Series)): + return self._orig_grouper.name + + elif isinstance(self.grouping_vector, ops.BaseGrouper): + return self.grouping_vector.result_index.name + + elif isinstance(self.grouping_vector, Index): + return self.grouping_vector.name + + # otherwise we have ndarray or ExtensionArray -> no name + return None + + @cache_readonly + def _ilevel(self) -> int | None: + """ + If necessary, converted index level name to index level position. + """ + level = self.level + if level is None: + return None + if not isinstance(level, int): + index = self._index + if level not in index.names: + raise AssertionError(f"Level {level} not in index") + return index.names.index(level) + return level + + @property + def ngroups(self) -> int: + return len(self._group_index) + + @cache_readonly + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + # we have a list of groupers + if isinstance(self.grouping_vector, ops.BaseGrouper): + return self.grouping_vector.indices + + values = Categorical(self.grouping_vector) + return values._reverse_indexer() + + @property + def codes(self) -> npt.NDArray[np.signedinteger]: + return self._codes_and_uniques[0] + + @cache_readonly + def _group_arraylike(self) -> ArrayLike: + """ + Analogous to result_index, but holding an ArrayLike to ensure + we can retain ExtensionDtypes. + """ + if self._all_grouper is not None: + # retain dtype for categories, including unobserved ones + return self._result_index._values + + elif self._passed_categorical: + return self._group_index._values + + return self._codes_and_uniques[1] + + @property + def group_arraylike(self) -> ArrayLike: + """ + Analogous to result_index, but holding an ArrayLike to ensure + we can retain ExtensionDtypes. + """ + warnings.warn( + "group_arraylike is deprecated and will be removed in a future " + "version of pandas", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + return self._group_arraylike + + @cache_readonly + def _result_index(self) -> Index: + # result_index retains dtype for categories, including unobserved ones, + # which group_index does not + if self._all_grouper is not None: + group_idx = self._group_index + assert isinstance(group_idx, CategoricalIndex) + cats = self._orig_cats + # set_categories is dynamically added + return group_idx.set_categories(cats) # type: ignore[attr-defined] + return self._group_index + + @property + def result_index(self) -> Index: + warnings.warn( + "result_index is deprecated and will be removed in a future " + "version of pandas", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + return self._result_index + + @cache_readonly + def _group_index(self) -> Index: + codes, uniques = self._codes_and_uniques + if not self._dropna and self._passed_categorical: + assert isinstance(uniques, Categorical) + if self._sort and (codes == len(uniques)).any(): + # Add NA value on the end when sorting + uniques = Categorical.from_codes( + np.append(uniques.codes, [-1]), uniques.categories, validate=False + ) + elif len(codes) > 0: + # Need to determine proper placement of NA value when not sorting + cat = self.grouping_vector + na_idx = (cat.codes < 0).argmax() + if cat.codes[na_idx] < 0: + # count number of unique codes that comes before the nan value + na_unique_idx = algorithms.nunique_ints(cat.codes[:na_idx]) + new_codes = np.insert(uniques.codes, na_unique_idx, -1) + uniques = Categorical.from_codes( + new_codes, uniques.categories, validate=False + ) + return Index._with_infer(uniques, name=self.name) + + @property + def group_index(self) -> Index: + warnings.warn( + "group_index is deprecated and will be removed in a future " + "version of pandas", + category=FutureWarning, + stacklevel=find_stack_level(), + ) + return self._group_index + + @cache_readonly + def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]: + uniques: ArrayLike + if self._passed_categorical: + # we make a CategoricalIndex out of the cat grouper + # preserving the categories / ordered attributes; + # doesn't (yet - GH#46909) handle dropna=False + cat = self.grouping_vector + categories = cat.categories + + if self._observed: + ucodes = algorithms.unique1d(cat.codes) + ucodes = ucodes[ucodes != -1] + if self._sort: + ucodes = np.sort(ucodes) + else: + ucodes = np.arange(len(categories)) + + uniques = Categorical.from_codes( + codes=ucodes, categories=categories, ordered=cat.ordered, validate=False + ) + + codes = cat.codes + if not self._dropna: + na_mask = codes < 0 + if np.any(na_mask): + if self._sort: + # Replace NA codes with `largest code + 1` + na_code = len(categories) + codes = np.where(na_mask, na_code, codes) + else: + # Insert NA code into the codes based on first appearance + # A negative code must exist, no need to check codes[na_idx] < 0 + na_idx = na_mask.argmax() + # count number of unique codes that comes before the nan value + na_code = algorithms.nunique_ints(codes[:na_idx]) + codes = np.where(codes >= na_code, codes + 1, codes) + codes = np.where(na_mask, na_code, codes) + + if not self._observed: + uniques = uniques.reorder_categories(self._orig_cats) + + return codes, uniques + + elif isinstance(self.grouping_vector, ops.BaseGrouper): + # we have a list of groupers + codes = self.grouping_vector.codes_info + uniques = self.grouping_vector.result_index._values + elif self._uniques is not None: + # GH#50486 Code grouping_vector using _uniques; allows + # including uniques that are not present in grouping_vector. + cat = Categorical(self.grouping_vector, categories=self._uniques) + codes = cat.codes + uniques = self._uniques + else: + # GH35667, replace dropna=False with use_na_sentinel=False + # error: Incompatible types in assignment (expression has type "Union[ + # ndarray[Any, Any], Index]", variable has type "Categorical") + codes, uniques = algorithms.factorize( # type: ignore[assignment] + self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna + ) + return codes, uniques + + @cache_readonly + def groups(self) -> dict[Hashable, np.ndarray]: + cats = Categorical.from_codes(self.codes, self._group_index, validate=False) + return self._index.groupby(cats) + + +def get_grouper( + obj: NDFrameT, + key=None, + axis: Axis = 0, + level=None, + sort: bool = True, + observed: bool = False, + validate: bool = True, + dropna: bool = True, +) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]: + """ + Create and return a BaseGrouper, which is an internal + mapping of how to create the grouper indexers. + This may be composed of multiple Grouping objects, indicating + multiple groupers + + Groupers are ultimately index mappings. They can originate as: + index mappings, keys to columns, functions, or Groupers + + Groupers enable local references to axis,level,sort, while + the passed in axis, level, and sort are 'global'. + + This routine tries to figure out what the passing in references + are and then creates a Grouping for each one, combined into + a BaseGrouper. + + If observed & we have a categorical grouper, only show the observed + values. + + If validate, then check for key/level overlaps. + + """ + group_axis = obj._get_axis(axis) + + # validate that the passed single level is compatible with the passed + # axis of the object + if level is not None: + # TODO: These if-block and else-block are almost same. + # MultiIndex instance check is removable, but it seems that there are + # some processes only for non-MultiIndex in else-block, + # eg. `obj.index.name != level`. We have to consider carefully whether + # these are applicable for MultiIndex. Even if these are applicable, + # we need to check if it makes no side effect to subsequent processes + # on the outside of this condition. + # (GH 17621) + if isinstance(group_axis, MultiIndex): + if is_list_like(level) and len(level) == 1: + level = level[0] + + if key is None and is_scalar(level): + # Get the level values from group_axis + key = group_axis.get_level_values(level) + level = None + + else: + # allow level to be a length-one list-like object + # (e.g., level=[0]) + # GH 13901 + if is_list_like(level): + nlevels = len(level) + if nlevels == 1: + level = level[0] + elif nlevels == 0: + raise ValueError("No group keys passed!") + else: + raise ValueError("multiple levels only valid with MultiIndex") + + if isinstance(level, str): + if obj._get_axis(axis).name != level: + raise ValueError( + f"level name {level} is not the name " + f"of the {obj._get_axis_name(axis)}" + ) + elif level > 0 or level < -1: + raise ValueError("level > 0 or level < -1 only valid with MultiIndex") + + # NOTE: `group_axis` and `group_axis.get_level_values(level)` + # are same in this section. + level = None + key = group_axis + + # a passed-in Grouper, directly convert + if isinstance(key, Grouper): + grouper, obj = key._get_grouper(obj, validate=False) + if key.key is None: + return grouper, frozenset(), obj + else: + return grouper, frozenset({key.key}), obj + + # already have a BaseGrouper, just return it + elif isinstance(key, ops.BaseGrouper): + return key, frozenset(), obj + + if not isinstance(key, list): + keys = [key] + match_axis_length = False + else: + keys = key + match_axis_length = len(keys) == len(group_axis) + + # what are we after, exactly? + any_callable = any(callable(g) or isinstance(g, dict) for g in keys) + any_groupers = any(isinstance(g, (Grouper, Grouping)) for g in keys) + any_arraylike = any( + isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys + ) + + # is this an index replacement? + if ( + not any_callable + and not any_arraylike + and not any_groupers + and match_axis_length + and level is None + ): + if isinstance(obj, DataFrame): + all_in_columns_index = all( + g in obj.columns or g in obj.index.names for g in keys + ) + else: + assert isinstance(obj, Series) + all_in_columns_index = all(g in obj.index.names for g in keys) + + if not all_in_columns_index: + keys = [com.asarray_tuplesafe(keys)] + + if isinstance(level, (tuple, list)): + if key is None: + keys = [None] * len(level) + levels = level + else: + levels = [level] * len(keys) + + groupings: list[Grouping] = [] + exclusions: set[Hashable] = set() + + # if the actual grouper should be obj[key] + def is_in_axis(key) -> bool: + if not _is_label_like(key): + if obj.ndim == 1: + return False + + # items -> .columns for DataFrame, .index for Series + items = obj.axes[-1] + try: + items.get_loc(key) + except (KeyError, TypeError, InvalidIndexError): + # TypeError shows up here if we pass e.g. an Index + return False + + return True + + # if the grouper is obj[name] + def is_in_obj(gpr) -> bool: + if not hasattr(gpr, "name"): + return False + if using_copy_on_write() or warn_copy_on_write(): + # For the CoW case, we check the references to determine if the + # series is part of the object + try: + obj_gpr_column = obj[gpr.name] + except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime): + return False + if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series): + return gpr._mgr.references_same_values( # type: ignore[union-attr] + obj_gpr_column._mgr, 0 # type: ignore[arg-type] + ) + return False + try: + return gpr is obj[gpr.name] + except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime): + # IndexError reached in e.g. test_skip_group_keys when we pass + # lambda here + # InvalidIndexError raised on key-types inappropriate for index, + # e.g. DatetimeIndex.get_loc(tuple()) + # OutOfBoundsDatetime raised when obj is a Series with DatetimeIndex + # and gpr.name is month str + return False + + for gpr, level in zip(keys, levels): + if is_in_obj(gpr): # df.groupby(df['name']) + in_axis = True + exclusions.add(gpr.name) + + elif is_in_axis(gpr): # df.groupby('name') + if obj.ndim != 1 and gpr in obj: + if validate: + obj._check_label_or_level_ambiguity(gpr, axis=axis) + in_axis, name, gpr = True, gpr, obj[gpr] + if gpr.ndim != 1: + # non-unique columns; raise here to get the name in the + # exception message + raise ValueError(f"Grouper for '{name}' not 1-dimensional") + exclusions.add(name) + elif obj._is_level_reference(gpr, axis=axis): + in_axis, level, gpr = False, gpr, None + else: + raise KeyError(gpr) + elif isinstance(gpr, Grouper) and gpr.key is not None: + # Add key to exclusions + exclusions.add(gpr.key) + in_axis = True + else: + in_axis = False + + # create the Grouping + # allow us to passing the actual Grouping as the gpr + ping = ( + Grouping( + group_axis, + gpr, + obj=obj, + level=level, + sort=sort, + observed=observed, + in_axis=in_axis, + dropna=dropna, + ) + if not isinstance(gpr, Grouping) + else gpr + ) + + groupings.append(ping) + + if len(groupings) == 0 and len(obj): + raise ValueError("No group keys passed!") + if len(groupings) == 0: + groupings.append(Grouping(Index([], dtype="int"), np.array([], dtype=np.intp))) + + # create the internals grouper + grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna) + return grouper, frozenset(exclusions), obj + + +def _is_label_like(val) -> bool: + return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val)) + + +def _convert_grouper(axis: Index, grouper): + if isinstance(grouper, dict): + return grouper.get + elif isinstance(grouper, Series): + if grouper.index.equals(axis): + return grouper._values + else: + return grouper.reindex(axis)._values + elif isinstance(grouper, MultiIndex): + return grouper._values + elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)): + if len(grouper) != len(axis): + raise ValueError("Grouper and axis must be same length") + + if isinstance(grouper, (list, tuple)): + grouper = com.asarray_tuplesafe(grouper) + return grouper + else: + return grouper diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/indexing.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..a3c5ab8edc94e4f91175891282252d0e8cdfd3ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/indexing.py @@ -0,0 +1,304 @@ +from __future__ import annotations + +from collections.abc import Iterable +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) + +import numpy as np + +from pandas.util._decorators import ( + cache_readonly, + doc, +) + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) + +if TYPE_CHECKING: + from pandas._typing import PositionalIndexer + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.groupby import groupby + + +class GroupByIndexingMixin: + """ + Mixin for adding ._positional_selector to GroupBy. + """ + + @cache_readonly + def _positional_selector(self) -> GroupByPositionalSelector: + """ + Return positional selection for each group. + + ``groupby._positional_selector[i:j]`` is similar to + ``groupby.apply(lambda x: x.iloc[i:j])`` + but much faster and preserves the original index and order. + + ``_positional_selector[]`` is compatible with and extends :meth:`~GroupBy.head` + and :meth:`~GroupBy.tail`. For example: + + - ``head(5)`` + - ``_positional_selector[5:-5]`` + - ``tail(5)`` + + together return all the rows. + + Allowed inputs for the index are: + + - An integer valued iterable, e.g. ``range(2, 4)``. + - A comma separated list of integers and slices, e.g. ``5``, ``2, 4``, ``2:4``. + + The output format is the same as :meth:`~GroupBy.head` and + :meth:`~GroupBy.tail`, namely + a subset of the ``DataFrame`` or ``Series`` with the index and order preserved. + + Returns + ------- + Series + The filtered subset of the original Series. + DataFrame + The filtered subset of the original DataFrame. + + See Also + -------- + DataFrame.iloc : Purely integer-location based indexing for selection by + position. + GroupBy.head : Return first n rows of each group. + GroupBy.tail : Return last n rows of each group. + GroupBy.nth : Take the nth row from each group if n is an int, or a + subset of rows, if n is a list of ints. + + Notes + ----- + - The slice step cannot be negative. + - If the index specification results in overlaps, the item is not duplicated. + - If the index specification changes the order of items, then + they are returned in their original order. + By contrast, ``DataFrame.iloc`` can change the row order. + - ``groupby()`` parameters such as as_index and dropna are ignored. + + The differences between ``_positional_selector[]`` and :meth:`~GroupBy.nth` + with ``as_index=False`` are: + + - Input to ``_positional_selector`` can include + one or more slices whereas ``nth`` + just handles an integer or a list of integers. + - ``_positional_selector`` can accept a slice relative to the + last row of each group. + - ``_positional_selector`` does not have an equivalent to the + ``nth()`` ``dropna`` parameter. + + Examples + -------- + >>> df = pd.DataFrame([["a", 1], ["a", 2], ["a", 3], ["b", 4], ["b", 5]], + ... columns=["A", "B"]) + >>> df.groupby("A")._positional_selector[1:2] + A B + 1 a 2 + 4 b 5 + + >>> df.groupby("A")._positional_selector[1, -1] + A B + 1 a 2 + 2 a 3 + 4 b 5 + """ + if TYPE_CHECKING: + # pylint: disable-next=used-before-assignment + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return GroupByPositionalSelector(groupby_self) + + def _make_mask_from_positional_indexer( + self, + arg: PositionalIndexer | tuple, + ) -> np.ndarray: + if is_list_like(arg): + if all(is_integer(i) for i in cast(Iterable, arg)): + mask = self._make_mask_from_list(cast(Iterable[int], arg)) + else: + mask = self._make_mask_from_tuple(cast(tuple, arg)) + + elif isinstance(arg, slice): + mask = self._make_mask_from_slice(arg) + elif is_integer(arg): + mask = self._make_mask_from_int(cast(int, arg)) + else: + raise TypeError( + f"Invalid index {type(arg)}. " + "Must be integer, list-like, slice or a tuple of " + "integers and slices" + ) + + if isinstance(mask, bool): + if mask: + mask = self._ascending_count >= 0 + else: + mask = self._ascending_count < 0 + + return cast(np.ndarray, mask) + + def _make_mask_from_int(self, arg: int) -> np.ndarray: + if arg >= 0: + return self._ascending_count == arg + else: + return self._descending_count == (-arg - 1) + + def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray: + positive = [arg for arg in args if arg >= 0] + negative = [-arg - 1 for arg in args if arg < 0] + + mask: bool | np.ndarray = False + + if positive: + mask |= np.isin(self._ascending_count, positive) + + if negative: + mask |= np.isin(self._descending_count, negative) + + return mask + + def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray: + mask: bool | np.ndarray = False + + for arg in args: + if is_integer(arg): + mask |= self._make_mask_from_int(cast(int, arg)) + elif isinstance(arg, slice): + mask |= self._make_mask_from_slice(arg) + else: + raise ValueError( + f"Invalid argument {type(arg)}. Should be int or slice." + ) + + return mask + + def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray: + start = arg.start + stop = arg.stop + step = arg.step + + if step is not None and step < 0: + raise ValueError(f"Invalid step {step}. Must be non-negative") + + mask: bool | np.ndarray = True + + if step is None: + step = 1 + + if start is None: + if step > 1: + mask &= self._ascending_count % step == 0 + + elif start >= 0: + mask &= self._ascending_count >= start + + if step > 1: + mask &= (self._ascending_count - start) % step == 0 + + else: + mask &= self._descending_count < -start + + offset_array = self._descending_count + start + 1 + limit_array = ( + self._ascending_count + self._descending_count + (start + 1) + ) < 0 + offset_array = np.where(limit_array, self._ascending_count, offset_array) + + mask &= offset_array % step == 0 + + if stop is not None: + if stop >= 0: + mask &= self._ascending_count < stop + else: + mask &= self._descending_count >= -stop + + return mask + + @cache_readonly + def _ascending_count(self) -> np.ndarray: + if TYPE_CHECKING: + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return groupby_self._cumcount_array() + + @cache_readonly + def _descending_count(self) -> np.ndarray: + if TYPE_CHECKING: + groupby_self = cast(groupby.GroupBy, self) + else: + groupby_self = self + + return groupby_self._cumcount_array(ascending=False) + + +@doc(GroupByIndexingMixin._positional_selector) +class GroupByPositionalSelector: + def __init__(self, groupby_object: groupby.GroupBy) -> None: + self.groupby_object = groupby_object + + def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series: + """ + Select by positional index per group. + + Implements GroupBy._positional_selector + + Parameters + ---------- + arg : PositionalIndexer | tuple + Allowed values are: + - int + - int valued iterable such as list or range + - slice with step either None or positive + - tuple of integers and slices + + Returns + ------- + Series + The filtered subset of the original groupby Series. + DataFrame + The filtered subset of the original groupby DataFrame. + + See Also + -------- + DataFrame.iloc : Integer-location based indexing for selection by position. + GroupBy.head : Return first n rows of each group. + GroupBy.tail : Return last n rows of each group. + GroupBy._positional_selector : Return positional selection for each group. + GroupBy.nth : Take the nth row from each group if n is an int, or a + subset of rows, if n is a list of ints. + """ + mask = self.groupby_object._make_mask_from_positional_indexer(arg) + return self.groupby_object._mask_selected_obj(mask) + + +class GroupByNthSelector: + """ + Dynamically substituted for GroupBy.nth to enable both call and index + """ + + def __init__(self, groupby_object: groupby.GroupBy) -> None: + self.groupby_object = groupby_object + + def __call__( + self, + n: PositionalIndexer | tuple, + dropna: Literal["any", "all", None] = None, + ) -> DataFrame | Series: + return self.groupby_object._nth(n, dropna) + + def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series: + return self.groupby_object._nth(n) diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/numba_.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/numba_.py new file mode 100644 index 0000000000000000000000000000000000000000..3b7a58e87603e578216c4c80e8c88e06828d5dfa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/numba_.py @@ -0,0 +1,181 @@ +"""Common utilities for Numba operations with groupby ops""" +from __future__ import annotations + +import functools +import inspect +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.util.numba_ import ( + NumbaUtilError, + jit_user_function, +) + +if TYPE_CHECKING: + from pandas._typing import Scalar + + +def validate_udf(func: Callable) -> None: + """ + Validate user defined function for ops when using Numba with groupby ops. + + The first signature arguments should include: + + def f(values, index, ...): + ... + + Parameters + ---------- + func : function, default False + user defined function + + Returns + ------- + None + + Raises + ------ + NumbaUtilError + """ + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + udf_signature = list(inspect.signature(func).parameters.keys()) + expected_args = ["values", "index"] + min_number_args = len(expected_args) + if ( + len(udf_signature) < min_number_args + or udf_signature[:min_number_args] != expected_args + ): + raise NumbaUtilError( + f"The first {min_number_args} arguments to {func.__name__} must be " + f"{expected_args}" + ) + + +@functools.cache +def generate_numba_agg_func( + func: Callable[..., Scalar], + nopython: bool, + nogil: bool, + parallel: bool, +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: + """ + Generate a numba jitted agg function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby agg function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the groupby evaluation loop. + + Parameters + ---------- + func : function + function to be applied to each group and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_agg( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_columns: int, + *args: Any, + ) -> np.ndarray: + assert len(begin) == len(end) + num_groups = len(begin) + + result = np.empty((num_groups, num_columns)) + for i in numba.prange(num_groups): + group_index = index[begin[i] : end[i]] + for j in numba.prange(num_columns): + group = values[begin[i] : end[i], j] + result[i, j] = numba_func(group, group_index, *args) + return result + + return group_agg + + +@functools.cache +def generate_numba_transform_func( + func: Callable[..., np.ndarray], + nopython: bool, + nogil: bool, + parallel: bool, +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: + """ + Generate a numba jitted transform function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby transform function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the groupby evaluation loop. + + Parameters + ---------- + func : function + function to be applied to each window and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_transform( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_columns: int, + *args: Any, + ) -> np.ndarray: + assert len(begin) == len(end) + num_groups = len(begin) + + result = np.empty((len(values), num_columns)) + for i in numba.prange(num_groups): + group_index = index[begin[i] : end[i]] + for j in numba.prange(num_columns): + group = values[begin[i] : end[i], j] + result[begin[i] : end[i], j] = numba_func(group, group_index, *args) + return result + + return group_transform diff --git a/venv/lib/python3.10/site-packages/pandas/core/groupby/ops.py b/venv/lib/python3.10/site-packages/pandas/core/groupby/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..e2ddf9aa5c0c1752e1de9b81e2e72873db050e85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/groupby/ops.py @@ -0,0 +1,1208 @@ +""" +Provide classes to perform the groupby aggregate operations. + +These are not exposed to the user and provide implementations of the grouping +operations, primarily in cython. These classes (BaseGrouper and BinGrouper) +are contained *in* the SeriesGroupBy and DataFrameGroupBy objects. +""" +from __future__ import annotations + +import collections +import functools +from typing import ( + TYPE_CHECKING, + Callable, + Generic, + final, +) + +import numpy as np + +from pandas._libs import ( + NaT, + lib, +) +import pandas._libs.groupby as libgroupby +from pandas._typing import ( + ArrayLike, + AxisInt, + NDFrameT, + Shape, + npt, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.cast import ( + maybe_cast_pointwise_result, + maybe_downcast_to_dtype, +) +from pandas.core.dtypes.common import ( + ensure_float64, + ensure_int64, + ensure_platform_int, + ensure_uint64, + is_1d_only_ea_dtype, +) +from pandas.core.dtypes.missing import ( + isna, + maybe_fill, +) + +from pandas.core.frame import DataFrame +from pandas.core.groupby import grouper +from pandas.core.indexes.api import ( + CategoricalIndex, + Index, + MultiIndex, + ensure_index, +) +from pandas.core.series import Series +from pandas.core.sorting import ( + compress_group_index, + decons_obs_group_ids, + get_flattened_list, + get_group_index, + get_group_index_sorter, + get_indexer_dict, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Sequence, + ) + + from pandas.core.generic import NDFrame + + +def check_result_array(obj, dtype) -> None: + # Our operation is supposed to be an aggregation/reduction. If + # it returns an ndarray, this likely means an invalid operation has + # been passed. See test_apply_without_aggregation, test_agg_must_agg + if isinstance(obj, np.ndarray): + if dtype != object: + # If it is object dtype, the function can be a reduction/aggregation + # and still return an ndarray e.g. test_agg_over_numpy_arrays + raise ValueError("Must produce aggregated value") + + +def extract_result(res): + """ + Extract the result object, it might be a 0-dim ndarray + or a len-1 0-dim, or a scalar + """ + if hasattr(res, "_values"): + # Preserve EA + res = res._values + if res.ndim == 1 and len(res) == 1: + # see test_agg_lambda_with_timezone, test_resampler_grouper.py::test_apply + res = res[0] + return res + + +class WrappedCythonOp: + """ + Dispatch logic for functions defined in _libs.groupby + + Parameters + ---------- + kind: str + Whether the operation is an aggregate or transform. + how: str + Operation name, e.g. "mean". + has_dropped_na: bool + True precisely when dropna=True and the grouper contains a null value. + """ + + # Functions for which we do _not_ attempt to cast the cython result + # back to the original dtype. + cast_blocklist = frozenset( + ["any", "all", "rank", "count", "size", "idxmin", "idxmax"] + ) + + def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: + self.kind = kind + self.how = how + self.has_dropped_na = has_dropped_na + + _CYTHON_FUNCTIONS: dict[str, dict] = { + "aggregate": { + "any": functools.partial(libgroupby.group_any_all, val_test="any"), + "all": functools.partial(libgroupby.group_any_all, val_test="all"), + "sum": "group_sum", + "prod": "group_prod", + "idxmin": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmin"), + "idxmax": functools.partial(libgroupby.group_idxmin_idxmax, name="idxmax"), + "min": "group_min", + "max": "group_max", + "mean": "group_mean", + "median": "group_median_float64", + "var": "group_var", + "std": functools.partial(libgroupby.group_var, name="std"), + "sem": functools.partial(libgroupby.group_var, name="sem"), + "skew": "group_skew", + "first": "group_nth", + "last": "group_last", + "ohlc": "group_ohlc", + }, + "transform": { + "cumprod": "group_cumprod", + "cumsum": "group_cumsum", + "cummin": "group_cummin", + "cummax": "group_cummax", + "rank": "group_rank", + }, + } + + _cython_arity = {"ohlc": 4} # OHLC + + @classmethod + def get_kind_from_how(cls, how: str) -> str: + if how in cls._CYTHON_FUNCTIONS["aggregate"]: + return "aggregate" + return "transform" + + # Note: we make this a classmethod and pass kind+how so that caching + # works at the class level and not the instance level + @classmethod + @functools.cache + def _get_cython_function( + cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool + ): + dtype_str = dtype.name + ftype = cls._CYTHON_FUNCTIONS[kind][how] + + # see if there is a fused-type version of function + # only valid for numeric + if callable(ftype): + f = ftype + else: + f = getattr(libgroupby, ftype) + if is_numeric: + return f + elif dtype == np.dtype(object): + if how in ["median", "cumprod"]: + # no fused types -> no __signatures__ + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + elif how in ["std", "sem", "idxmin", "idxmax"]: + # We have a partial object that does not have __signatures__ + return f + elif how == "skew": + # _get_cython_vals will convert to float64 + pass + elif "object" not in f.__signatures__: + # raise NotImplementedError here rather than TypeError later + raise NotImplementedError( + f"function is not implemented for this dtype: " + f"[how->{how},dtype->{dtype_str}]" + ) + return f + else: + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/", + dtype, + ) + + def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: + """ + Cast numeric dtypes to float64 for functions that only support that. + + Parameters + ---------- + values : np.ndarray + + Returns + ------- + values : np.ndarray + """ + how = self.how + + if how in ["median", "std", "sem", "skew"]: + # median only has a float64 implementation + # We should only get here with is_numeric, as non-numeric cases + # should raise in _get_cython_function + values = ensure_float64(values) + + elif values.dtype.kind in "iu": + if how in ["var", "mean"] or ( + self.kind == "transform" and self.has_dropped_na + ): + # has_dropped_na check need for test_null_group_str_transformer + # result may still include NaN, so we have to cast + values = ensure_float64(values) + + elif how in ["sum", "ohlc", "prod", "cumsum", "cumprod"]: + # Avoid overflow during group op + if values.dtype.kind == "i": + values = ensure_int64(values) + else: + values = ensure_uint64(values) + + return values + + def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape: + how = self.how + kind = self.kind + + arity = self._cython_arity.get(how, 1) + + out_shape: Shape + if how == "ohlc": + out_shape = (ngroups, arity) + elif arity > 1: + raise NotImplementedError( + "arity of more than 1 is not supported for the 'how' argument" + ) + elif kind == "transform": + out_shape = values.shape + else: + out_shape = (ngroups,) + values.shape[1:] + return out_shape + + def _get_out_dtype(self, dtype: np.dtype) -> np.dtype: + how = self.how + + if how == "rank": + out_dtype = "float64" + elif how in ["idxmin", "idxmax"]: + # The Cython implementation only produces the row number; we'll take + # from the index using this in post processing + out_dtype = "intp" + else: + if dtype.kind in "iufcb": + out_dtype = f"{dtype.kind}{dtype.itemsize}" + else: + out_dtype = "object" + return np.dtype(out_dtype) + + def _get_result_dtype(self, dtype: np.dtype) -> np.dtype: + """ + Get the desired dtype of a result based on the + input dtype and how it was computed. + + Parameters + ---------- + dtype : np.dtype + + Returns + ------- + np.dtype + The desired dtype of the result. + """ + how = self.how + + if how in ["sum", "cumsum", "sum", "prod", "cumprod"]: + if dtype == np.dtype(bool): + return np.dtype(np.int64) + elif how in ["mean", "median", "var", "std", "sem"]: + if dtype.kind in "fc": + return dtype + elif dtype.kind in "iub": + return np.dtype(np.float64) + return dtype + + @final + def _cython_op_ndim_compat( + self, + values: np.ndarray, + *, + min_count: int, + ngroups: int, + comp_ids: np.ndarray, + mask: npt.NDArray[np.bool_] | None = None, + result_mask: npt.NDArray[np.bool_] | None = None, + **kwargs, + ) -> np.ndarray: + if values.ndim == 1: + # expand to 2d, dispatch, then squeeze if appropriate + values2d = values[None, :] + if mask is not None: + mask = mask[None, :] + if result_mask is not None: + result_mask = result_mask[None, :] + res = self._call_cython_op( + values2d, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + if res.shape[0] == 1: + return res[0] + + # otherwise we have OHLC + return res.T + + return self._call_cython_op( + values, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + + @final + def _call_cython_op( + self, + values: np.ndarray, # np.ndarray[ndim=2] + *, + min_count: int, + ngroups: int, + comp_ids: np.ndarray, + mask: npt.NDArray[np.bool_] | None, + result_mask: npt.NDArray[np.bool_] | None, + **kwargs, + ) -> np.ndarray: # np.ndarray[ndim=2] + orig_values = values + + dtype = values.dtype + is_numeric = dtype.kind in "iufcb" + + is_datetimelike = dtype.kind in "mM" + + if is_datetimelike: + values = values.view("int64") + is_numeric = True + elif dtype.kind == "b": + values = values.view("uint8") + if values.dtype == "float16": + values = values.astype(np.float32) + + if self.how in ["any", "all"]: + if mask is None: + mask = isna(values) + if dtype == object: + if kwargs["skipna"]: + # GH#37501: don't raise on pd.NA when skipna=True + if mask.any(): + # mask on original values computed separately + values = values.copy() + values[mask] = True + values = values.astype(bool, copy=False).view(np.int8) + is_numeric = True + + values = values.T + if mask is not None: + mask = mask.T + if result_mask is not None: + result_mask = result_mask.T + + out_shape = self._get_output_shape(ngroups, values) + func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric) + values = self._get_cython_vals(values) + out_dtype = self._get_out_dtype(values.dtype) + + result = maybe_fill(np.empty(out_shape, dtype=out_dtype)) + if self.kind == "aggregate": + counts = np.zeros(ngroups, dtype=np.int64) + if self.how in [ + "idxmin", + "idxmax", + "min", + "max", + "mean", + "last", + "first", + "sum", + ]: + func( + out=result, + counts=counts, + values=values, + labels=comp_ids, + min_count=min_count, + mask=mask, + result_mask=result_mask, + is_datetimelike=is_datetimelike, + **kwargs, + ) + elif self.how in ["sem", "std", "var", "ohlc", "prod", "median"]: + if self.how in ["std", "sem"]: + kwargs["is_datetimelike"] = is_datetimelike + func( + result, + counts, + values, + comp_ids, + min_count=min_count, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + elif self.how in ["any", "all"]: + func( + out=result, + values=values, + labels=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + result = result.astype(bool, copy=False) + elif self.how in ["skew"]: + func( + out=result, + counts=counts, + values=values, + labels=comp_ids, + mask=mask, + result_mask=result_mask, + **kwargs, + ) + if dtype == object: + result = result.astype(object) + + else: + raise NotImplementedError(f"{self.how} is not implemented") + else: + # TODO: min_count + if self.how != "rank": + # TODO: should rank take result_mask? + kwargs["result_mask"] = result_mask + func( + out=result, + values=values, + labels=comp_ids, + ngroups=ngroups, + is_datetimelike=is_datetimelike, + mask=mask, + **kwargs, + ) + + if self.kind == "aggregate" and self.how not in ["idxmin", "idxmax"]: + # i.e. counts is defined. Locations where count None: + if values.ndim > 2: + raise NotImplementedError("number of dimensions is currently limited to 2") + if values.ndim == 2: + assert axis == 1, axis + elif not is_1d_only_ea_dtype(values.dtype): + # Note: it is *not* the case that axis is always 0 for 1-dim values, + # as we can have 1D ExtensionArrays that we need to treat as 2D + assert axis == 0 + + @final + def cython_operation( + self, + *, + values: ArrayLike, + axis: AxisInt, + min_count: int = -1, + comp_ids: np.ndarray, + ngroups: int, + **kwargs, + ) -> ArrayLike: + """ + Call our cython function, with appropriate pre- and post- processing. + """ + self._validate_axis(axis, values) + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + return values._groupby_op( + how=self.how, + has_dropped_na=self.has_dropped_na, + min_count=min_count, + ngroups=ngroups, + ids=comp_ids, + **kwargs, + ) + + return self._cython_op_ndim_compat( + values, + min_count=min_count, + ngroups=ngroups, + comp_ids=comp_ids, + mask=None, + **kwargs, + ) + + +class BaseGrouper: + """ + This is an internal Grouper class, which actually holds + the generated groups + + Parameters + ---------- + axis : Index + groupings : Sequence[Grouping] + all the grouping instances to handle in this grouper + for example for grouper list to groupby, need to pass the list + sort : bool, default True + whether this grouper will give sorted result or not + + """ + + axis: Index + + def __init__( + self, + axis: Index, + groupings: Sequence[grouper.Grouping], + sort: bool = True, + dropna: bool = True, + ) -> None: + assert isinstance(axis, Index), axis + + self.axis = axis + self._groupings: list[grouper.Grouping] = list(groupings) + self._sort = sort + self.dropna = dropna + + @property + def groupings(self) -> list[grouper.Grouping]: + return self._groupings + + @property + def shape(self) -> Shape: + return tuple(ping.ngroups for ping in self.groupings) + + def __iter__(self) -> Iterator[Hashable]: + return iter(self.indices) + + @property + def nkeys(self) -> int: + return len(self.groupings) + + def get_iterator( + self, data: NDFrameT, axis: AxisInt = 0 + ) -> Iterator[tuple[Hashable, NDFrameT]]: + """ + Groupby iterator + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + """ + splitter = self._get_splitter(data, axis=axis) + keys = self.group_keys_seq + yield from zip(keys, splitter) + + @final + def _get_splitter(self, data: NDFrame, axis: AxisInt = 0) -> DataSplitter: + """ + Returns + ------- + Generator yielding subsetted objects + """ + ids, _, ngroups = self.group_info + return _get_splitter( + data, + ids, + ngroups, + sorted_ids=self._sorted_ids, + sort_idx=self._sort_idx, + axis=axis, + ) + + @final + @cache_readonly + def group_keys_seq(self): + if len(self.groupings) == 1: + return self.levels[0] + else: + ids, _, ngroups = self.group_info + + # provide "flattened" iterator for multi-group setting + return get_flattened_list(ids, ngroups, self.levels, self.codes) + + @cache_readonly + def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: + """dict {group name -> group indices}""" + if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex): + # This shows unused categories in indices GH#38642 + return self.groupings[0].indices + codes_list = [ping.codes for ping in self.groupings] + keys = [ping._group_index for ping in self.groupings] + return get_indexer_dict(codes_list, keys) + + @final + def result_ilocs(self) -> npt.NDArray[np.intp]: + """ + Get the original integer locations of result_index in the input. + """ + # Original indices are where group_index would go via sorting. + # But when dropna is true, we need to remove null values while accounting for + # any gaps that then occur because of them. + group_index = get_group_index( + self.codes, self.shape, sort=self._sort, xnull=True + ) + group_index, _ = compress_group_index(group_index, sort=self._sort) + + if self.has_dropped_na: + mask = np.where(group_index >= 0) + # Count how many gaps are caused by previous null values for each position + null_gaps = np.cumsum(group_index == -1)[mask] + group_index = group_index[mask] + + result = get_group_index_sorter(group_index, self.ngroups) + + if self.has_dropped_na: + # Shift by the number of prior null gaps + result += np.take(null_gaps, result) + + return result + + @final + @property + def codes(self) -> list[npt.NDArray[np.signedinteger]]: + return [ping.codes for ping in self.groupings] + + @property + def levels(self) -> list[Index]: + return [ping._group_index for ping in self.groupings] + + @property + def names(self) -> list[Hashable]: + return [ping.name for ping in self.groupings] + + @final + def size(self) -> Series: + """ + Compute group sizes. + """ + ids, _, ngroups = self.group_info + out: np.ndarray | list + if ngroups: + out = np.bincount(ids[ids != -1], minlength=ngroups) + else: + out = [] + return Series(out, index=self.result_index, dtype="int64", copy=False) + + @cache_readonly + def groups(self) -> dict[Hashable, np.ndarray]: + """dict {group name -> group labels}""" + if len(self.groupings) == 1: + return self.groupings[0].groups + else: + to_groupby = [] + for ping in self.groupings: + gv = ping.grouping_vector + if not isinstance(gv, BaseGrouper): + to_groupby.append(gv) + else: + to_groupby.append(gv.groupings[0].grouping_vector) + index = MultiIndex.from_arrays(to_groupby) + return self.axis.groupby(index) + + @final + @cache_readonly + def is_monotonic(self) -> bool: + # return if my group orderings are monotonic + return Index(self.group_info[0]).is_monotonic_increasing + + @final + @cache_readonly + def has_dropped_na(self) -> bool: + """ + Whether grouper has null value(s) that are dropped. + """ + return bool((self.group_info[0] < 0).any()) + + @cache_readonly + def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: + comp_ids, obs_group_ids = self._get_compressed_codes() + + ngroups = len(obs_group_ids) + comp_ids = ensure_platform_int(comp_ids) + + return comp_ids, obs_group_ids, ngroups + + @cache_readonly + def codes_info(self) -> npt.NDArray[np.intp]: + # return the codes of items in original grouped axis + ids, _, _ = self.group_info + return ids + + @final + def _get_compressed_codes( + self, + ) -> tuple[npt.NDArray[np.signedinteger], npt.NDArray[np.intp]]: + # The first returned ndarray may have any signed integer dtype + if len(self.groupings) > 1: + group_index = get_group_index(self.codes, self.shape, sort=True, xnull=True) + return compress_group_index(group_index, sort=self._sort) + # FIXME: compress_group_index's second return value is int64, not intp + + ping = self.groupings[0] + return ping.codes, np.arange(len(ping._group_index), dtype=np.intp) + + @final + @cache_readonly + def ngroups(self) -> int: + return len(self.result_index) + + @property + def reconstructed_codes(self) -> list[npt.NDArray[np.intp]]: + codes = self.codes + ids, obs_ids, _ = self.group_info + return decons_obs_group_ids(ids, obs_ids, self.shape, codes, xnull=True) + + @cache_readonly + def result_index(self) -> Index: + if len(self.groupings) == 1: + return self.groupings[0]._result_index.rename(self.names[0]) + + codes = self.reconstructed_codes + levels = [ping._result_index for ping in self.groupings] + return MultiIndex( + levels=levels, codes=codes, verify_integrity=False, names=self.names + ) + + @final + def get_group_levels(self) -> list[ArrayLike]: + # Note: only called from _insert_inaxis_grouper, which + # is only called for BaseGrouper, never for BinGrouper + if len(self.groupings) == 1: + return [self.groupings[0]._group_arraylike] + + name_list = [] + for ping, codes in zip(self.groupings, self.reconstructed_codes): + codes = ensure_platform_int(codes) + levels = ping._group_arraylike.take(codes) + + name_list.append(levels) + + return name_list + + # ------------------------------------------------------------ + # Aggregation functions + + @final + def _cython_operation( + self, + kind: str, + values, + how: str, + axis: AxisInt, + min_count: int = -1, + **kwargs, + ) -> ArrayLike: + """ + Returns the values of a cython operation. + """ + assert kind in ["transform", "aggregate"] + + cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na) + + ids, _, _ = self.group_info + ngroups = self.ngroups + return cy_op.cython_operation( + values=values, + axis=axis, + min_count=min_count, + comp_ids=ids, + ngroups=ngroups, + **kwargs, + ) + + @final + def agg_series( + self, obj: Series, func: Callable, preserve_dtype: bool = False + ) -> ArrayLike: + """ + Parameters + ---------- + obj : Series + func : function taking a Series and returning a scalar-like + preserve_dtype : bool + Whether the aggregation is known to be dtype-preserving. + + Returns + ------- + np.ndarray or ExtensionArray + """ + + if not isinstance(obj._values, np.ndarray): + # we can preserve a little bit more aggressively with EA dtype + # because maybe_cast_pointwise_result will do a try/except + # with _from_sequence. NB we are assuming here that _from_sequence + # is sufficiently strict that it casts appropriately. + preserve_dtype = True + + result = self._aggregate_series_pure_python(obj, func) + + npvalues = lib.maybe_convert_objects(result, try_float=False) + if preserve_dtype: + out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True) + else: + out = npvalues + return out + + @final + def _aggregate_series_pure_python( + self, obj: Series, func: Callable + ) -> npt.NDArray[np.object_]: + _, _, ngroups = self.group_info + + result = np.empty(ngroups, dtype="O") + initialized = False + + splitter = self._get_splitter(obj, axis=0) + + for i, group in enumerate(splitter): + res = func(group) + res = extract_result(res) + + if not initialized: + # We only do this validation on the first iteration + check_result_array(res, group.dtype) + initialized = True + + result[i] = res + + return result + + @final + def apply_groupwise( + self, f: Callable, data: DataFrame | Series, axis: AxisInt = 0 + ) -> tuple[list, bool]: + mutated = False + splitter = self._get_splitter(data, axis=axis) + group_keys = self.group_keys_seq + result_values = [] + + # This calls DataSplitter.__iter__ + zipped = zip(group_keys, splitter) + + for key, group in zipped: + # Pinning name is needed for + # test_group_apply_once_per_group, + # test_inconsistent_return_type, test_set_group_name, + # test_group_name_available_in_inference_pass, + # test_groupby_multi_timezone + object.__setattr__(group, "name", key) + + # group might be modified + group_axes = group.axes + res = f(group) + if not mutated and not _is_indexed_like(res, group_axes, axis): + mutated = True + result_values.append(res) + # getattr pattern for __name__ is needed for functools.partial objects + if len(group_keys) == 0 and getattr(f, "__name__", None) in [ + "skew", + "sum", + "prod", + ]: + # If group_keys is empty, then no function calls have been made, + # so we will not have raised even if this is an invalid dtype. + # So do one dummy call here to raise appropriate TypeError. + f(data.iloc[:0]) + + return result_values, mutated + + # ------------------------------------------------------------ + # Methods for sorting subsets of our GroupBy's object + + @final + @cache_readonly + def _sort_idx(self) -> npt.NDArray[np.intp]: + # Counting sort indexer + ids, _, ngroups = self.group_info + return get_group_index_sorter(ids, ngroups) + + @final + @cache_readonly + def _sorted_ids(self) -> npt.NDArray[np.intp]: + ids, _, _ = self.group_info + return ids.take(self._sort_idx) + + +class BinGrouper(BaseGrouper): + """ + This is an internal Grouper class + + Parameters + ---------- + bins : the split index of binlabels to group the item of axis + binlabels : the label list + indexer : np.ndarray[np.intp], optional + the indexer created by Grouper + some groupers (TimeGrouper) will sort its axis and its + group_info is also sorted, so need the indexer to reorder + + Examples + -------- + bins: [2, 4, 6, 8, 10] + binlabels: DatetimeIndex(['2005-01-01', '2005-01-03', + '2005-01-05', '2005-01-07', '2005-01-09'], + dtype='datetime64[ns]', freq='2D') + + the group_info, which contains the label of each item in grouped + axis, the index of label in label list, group number, is + + (array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5) + + means that, the grouped axis has 10 items, can be grouped into 5 + labels, the first and second items belong to the first label, the + third and forth items belong to the second label, and so on + + """ + + bins: npt.NDArray[np.int64] + binlabels: Index + + def __init__( + self, + bins, + binlabels, + indexer=None, + ) -> None: + self.bins = ensure_int64(bins) + self.binlabels = ensure_index(binlabels) + self.indexer = indexer + + # These lengths must match, otherwise we could call agg_series + # with empty self.bins, which would raise later. + assert len(self.binlabels) == len(self.bins) + + @cache_readonly + def groups(self): + """dict {group name -> group labels}""" + # this is mainly for compat + # GH 3881 + result = { + key: value + for key, value in zip(self.binlabels, self.bins) + if key is not NaT + } + return result + + @property + def nkeys(self) -> int: + # still matches len(self.groupings), but we can hard-code + return 1 + + @cache_readonly + def codes_info(self) -> npt.NDArray[np.intp]: + # return the codes of items in original grouped axis + ids, _, _ = self.group_info + if self.indexer is not None: + sorter = np.lexsort((ids, self.indexer)) + ids = ids[sorter] + return ids + + def get_iterator(self, data: NDFrame, axis: AxisInt = 0): + """ + Groupby iterator + + Returns + ------- + Generator yielding sequence of (name, subsetted object) + for each group + """ + if axis == 0: + slicer = lambda start, edge: data.iloc[start:edge] + else: + slicer = lambda start, edge: data.iloc[:, start:edge] + + length = len(data.axes[axis]) + + start = 0 + for edge, label in zip(self.bins, self.binlabels): + if label is not NaT: + yield label, slicer(start, edge) + start = edge + + if start < length: + yield self.binlabels[-1], slicer(start, None) + + @cache_readonly + def indices(self): + indices = collections.defaultdict(list) + + i = 0 + for label, bin in zip(self.binlabels, self.bins): + if i < bin: + if label is not NaT: + indices[label] = list(range(i, bin)) + i = bin + return indices + + @cache_readonly + def group_info(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: + ngroups = self.ngroups + obs_group_ids = np.arange(ngroups, dtype=np.intp) + rep = np.diff(np.r_[0, self.bins]) + + rep = ensure_platform_int(rep) + if ngroups == len(self.bins): + comp_ids = np.repeat(np.arange(ngroups), rep) + else: + comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep) + + return ( + ensure_platform_int(comp_ids), + obs_group_ids, + ngroups, + ) + + @cache_readonly + def reconstructed_codes(self) -> list[np.ndarray]: + # get unique result indices, and prepend 0 as groupby starts from the first + return [np.r_[0, np.flatnonzero(self.bins[1:] != self.bins[:-1]) + 1]] + + @cache_readonly + def result_index(self) -> Index: + if len(self.binlabels) != 0 and isna(self.binlabels[0]): + return self.binlabels[1:] + + return self.binlabels + + @property + def levels(self) -> list[Index]: + return [self.binlabels] + + @property + def names(self) -> list[Hashable]: + return [self.binlabels.name] + + @property + def groupings(self) -> list[grouper.Grouping]: + lev = self.binlabels + codes = self.group_info[0] + labels = lev.take(codes) + ping = grouper.Grouping( + labels, labels, in_axis=False, level=None, uniques=lev._values + ) + return [ping] + + +def _is_indexed_like(obj, axes, axis: AxisInt) -> bool: + if isinstance(obj, Series): + if len(axes) > 1: + return False + return obj.axes[axis].equals(axes[axis]) + elif isinstance(obj, DataFrame): + return obj.axes[axis].equals(axes[axis]) + + return False + + +# ---------------------------------------------------------------------- +# Splitting / application + + +class DataSplitter(Generic[NDFrameT]): + def __init__( + self, + data: NDFrameT, + labels: npt.NDArray[np.intp], + ngroups: int, + *, + sort_idx: npt.NDArray[np.intp], + sorted_ids: npt.NDArray[np.intp], + axis: AxisInt = 0, + ) -> None: + self.data = data + self.labels = ensure_platform_int(labels) # _should_ already be np.intp + self.ngroups = ngroups + + self._slabels = sorted_ids + self._sort_idx = sort_idx + + self.axis = axis + assert isinstance(axis, int), axis + + def __iter__(self) -> Iterator: + sdata = self._sorted_data + + if self.ngroups == 0: + # we are inside a generator, rather than raise StopIteration + # we merely return signal the end + return + + starts, ends = lib.generate_slices(self._slabels, self.ngroups) + + for start, end in zip(starts, ends): + yield self._chop(sdata, slice(start, end)) + + @cache_readonly + def _sorted_data(self) -> NDFrameT: + return self.data.take(self._sort_idx, axis=self.axis) + + def _chop(self, sdata, slice_obj: slice) -> NDFrame: + raise AbstractMethodError(self) + + +class SeriesSplitter(DataSplitter): + def _chop(self, sdata: Series, slice_obj: slice) -> Series: + # fastpath equivalent to `sdata.iloc[slice_obj]` + mgr = sdata._mgr.get_slice(slice_obj) + ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes) + ser._name = sdata.name + return ser.__finalize__(sdata, method="groupby") + + +class FrameSplitter(DataSplitter): + def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame: + # Fastpath equivalent to: + # if self.axis == 0: + # return sdata.iloc[slice_obj] + # else: + # return sdata.iloc[:, slice_obj] + mgr = sdata._mgr.get_slice(slice_obj, axis=1 - self.axis) + df = sdata._constructor_from_mgr(mgr, axes=mgr.axes) + return df.__finalize__(sdata, method="groupby") + + +def _get_splitter( + data: NDFrame, + labels: npt.NDArray[np.intp], + ngroups: int, + *, + sort_idx: npt.NDArray[np.intp], + sorted_ids: npt.NDArray[np.intp], + axis: AxisInt = 0, +) -> DataSplitter: + if isinstance(data, Series): + klass: type[DataSplitter] = SeriesSplitter + else: + # i.e. DataFrame + klass = FrameSplitter + + return klass( + data, labels, ngroups, sort_idx=sort_idx, sorted_ids=sorted_ids, axis=axis + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de6b5384fbf48f855a404a3fcd77327606edcb8e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b178a72b602f315025880608937ddfc3a0cf5ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17805c74639206eee1846a375672e073dcd1b3a2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/methods/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/methods/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbb9292591c98d96108f59fdeeaccd665eb94f41 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/describe.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/describe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..672d85f8a5019062d8d291310dbc886a33dc8ac3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/describe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/selectn.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/selectn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da7c36d98196f073dbfb350654b1d230c2228c1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/selectn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/to_dict.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/to_dict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6e50219ea8ede4207043e19caed8460e0373650 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/methods/__pycache__/to_dict.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/methods/describe.py b/venv/lib/python3.10/site-packages/pandas/core/methods/describe.py new file mode 100644 index 0000000000000000000000000000000000000000..c620bb9d17976a9deeb682ff323476c484f283ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/methods/describe.py @@ -0,0 +1,416 @@ +""" +Module responsible for execution of NDFrame.describe() method. + +Method NDFrame.describe() delegates actual execution to function describe_ndframe(). +""" +from __future__ import annotations + +from abc import ( + ABC, + abstractmethod, +) +from typing import ( + TYPE_CHECKING, + Callable, + cast, +) + +import numpy as np + +from pandas._libs.tslibs import Timestamp +from pandas._typing import ( + DtypeObj, + NDFrameT, + npt, +) +from pandas.util._validators import validate_percentile + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + DatetimeTZDtype, + ExtensionDtype, +) + +from pandas.core.arrays.floating import Float64Dtype +from pandas.core.reshape.concat import concat + +from pandas.io.formats.format import format_percentiles + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + + from pandas import ( + DataFrame, + Series, + ) + + +def describe_ndframe( + *, + obj: NDFrameT, + include: str | Sequence[str] | None, + exclude: str | Sequence[str] | None, + percentiles: Sequence[float] | np.ndarray | None, +) -> NDFrameT: + """Describe series or dataframe. + + Called from pandas.core.generic.NDFrame.describe() + + Parameters + ---------- + obj: DataFrame or Series + Either dataframe or series to be described. + include : 'all', list-like of dtypes or None (default), optional + A white list of data types to include in the result. Ignored for ``Series``. + exclude : list-like of dtypes or None (default), optional, + A black list of data types to omit from the result. Ignored for ``Series``. + percentiles : list-like of numbers, optional + The percentiles to include in the output. All should fall between 0 and 1. + The default is ``[.25, .5, .75]``, which returns the 25th, 50th, and + 75th percentiles. + + Returns + ------- + Dataframe or series description. + """ + percentiles = _refine_percentiles(percentiles) + + describer: NDFrameDescriberAbstract + if obj.ndim == 1: + describer = SeriesDescriber( + obj=cast("Series", obj), + ) + else: + describer = DataFrameDescriber( + obj=cast("DataFrame", obj), + include=include, + exclude=exclude, + ) + + result = describer.describe(percentiles=percentiles) + return cast(NDFrameT, result) + + +class NDFrameDescriberAbstract(ABC): + """Abstract class for describing dataframe or series. + + Parameters + ---------- + obj : Series or DataFrame + Object to be described. + """ + + def __init__(self, obj: DataFrame | Series) -> None: + self.obj = obj + + @abstractmethod + def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series: + """Do describe either series or dataframe. + + Parameters + ---------- + percentiles : list-like of numbers + The percentiles to include in the output. + """ + + +class SeriesDescriber(NDFrameDescriberAbstract): + """Class responsible for creating series description.""" + + obj: Series + + def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series: + describe_func = select_describe_func( + self.obj, + ) + return describe_func(self.obj, percentiles) + + +class DataFrameDescriber(NDFrameDescriberAbstract): + """Class responsible for creating dataobj description. + + Parameters + ---------- + obj : DataFrame + DataFrame to be described. + include : 'all', list-like of dtypes or None + A white list of data types to include in the result. + exclude : list-like of dtypes or None + A black list of data types to omit from the result. + """ + + obj: DataFrame + + def __init__( + self, + obj: DataFrame, + *, + include: str | Sequence[str] | None, + exclude: str | Sequence[str] | None, + ) -> None: + self.include = include + self.exclude = exclude + + if obj.ndim == 2 and obj.columns.size == 0: + raise ValueError("Cannot describe a DataFrame without columns") + + super().__init__(obj) + + def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame: + data = self._select_data() + + ldesc: list[Series] = [] + for _, series in data.items(): + describe_func = select_describe_func(series) + ldesc.append(describe_func(series, percentiles)) + + col_names = reorder_columns(ldesc) + d = concat( + [x.reindex(col_names, copy=False) for x in ldesc], + axis=1, + sort=False, + ) + d.columns = data.columns.copy() + return d + + def _select_data(self) -> DataFrame: + """Select columns to be described.""" + if (self.include is None) and (self.exclude is None): + # when some numerics are found, keep only numerics + default_include: list[npt.DTypeLike] = [np.number, "datetime"] + data = self.obj.select_dtypes(include=default_include) + if len(data.columns) == 0: + data = self.obj + elif self.include == "all": + if self.exclude is not None: + msg = "exclude must be None when include is 'all'" + raise ValueError(msg) + data = self.obj + else: + data = self.obj.select_dtypes( + include=self.include, + exclude=self.exclude, + ) + return data + + +def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]: + """Set a convenient order for rows for display.""" + names: list[Hashable] = [] + seen_names: set[Hashable] = set() + ldesc_indexes = sorted((x.index for x in ldesc), key=len) + for idxnames in ldesc_indexes: + for name in idxnames: + if name not in seen_names: + seen_names.add(name) + names.append(name) + return names + + +def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series: + """Describe series containing numerical data. + + Parameters + ---------- + series : Series + Series to be described. + percentiles : list-like of numbers + The percentiles to include in the output. + """ + from pandas import Series + + formatted_percentiles = format_percentiles(percentiles) + + stat_index = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"] + d = ( + [series.count(), series.mean(), series.std(), series.min()] + + series.quantile(percentiles).tolist() + + [series.max()] + ) + # GH#48340 - always return float on non-complex numeric data + dtype: DtypeObj | None + if isinstance(series.dtype, ExtensionDtype): + if isinstance(series.dtype, ArrowDtype): + if series.dtype.kind == "m": + # GH53001: describe timedeltas with object dtype + dtype = None + else: + import pyarrow as pa + + dtype = ArrowDtype(pa.float64()) + else: + dtype = Float64Dtype() + elif series.dtype.kind in "iufb": + # i.e. numeric but exclude complex dtype + dtype = np.dtype("float") + else: + dtype = None + return Series(d, index=stat_index, name=series.name, dtype=dtype) + + +def describe_categorical_1d( + data: Series, + percentiles_ignored: Sequence[float], +) -> Series: + """Describe series containing categorical data. + + Parameters + ---------- + data : Series + Series to be described. + percentiles_ignored : list-like of numbers + Ignored, but in place to unify interface. + """ + names = ["count", "unique", "top", "freq"] + objcounts = data.value_counts() + count_unique = len(objcounts[objcounts != 0]) + if count_unique > 0: + top, freq = objcounts.index[0], objcounts.iloc[0] + dtype = None + else: + # If the DataFrame is empty, set 'top' and 'freq' to None + # to maintain output shape consistency + top, freq = np.nan, np.nan + dtype = "object" + + result = [data.count(), count_unique, top, freq] + + from pandas import Series + + return Series(result, index=names, name=data.name, dtype=dtype) + + +def describe_timestamp_as_categorical_1d( + data: Series, + percentiles_ignored: Sequence[float], +) -> Series: + """Describe series containing timestamp data treated as categorical. + + Parameters + ---------- + data : Series + Series to be described. + percentiles_ignored : list-like of numbers + Ignored, but in place to unify interface. + """ + names = ["count", "unique"] + objcounts = data.value_counts() + count_unique = len(objcounts[objcounts != 0]) + result: list[float | Timestamp] = [data.count(), count_unique] + dtype = None + if count_unique > 0: + top, freq = objcounts.index[0], objcounts.iloc[0] + tz = data.dt.tz + asint = data.dropna().values.view("i8") + top = Timestamp(top) + if top.tzinfo is not None and tz is not None: + # Don't tz_localize(None) if key is already tz-aware + top = top.tz_convert(tz) + else: + top = top.tz_localize(tz) + names += ["top", "freq", "first", "last"] + result += [ + top, + freq, + Timestamp(asint.min(), tz=tz), + Timestamp(asint.max(), tz=tz), + ] + + # If the DataFrame is empty, set 'top' and 'freq' to None + # to maintain output shape consistency + else: + names += ["top", "freq"] + result += [np.nan, np.nan] + dtype = "object" + + from pandas import Series + + return Series(result, index=names, name=data.name, dtype=dtype) + + +def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series: + """Describe series containing datetime64 dtype. + + Parameters + ---------- + data : Series + Series to be described. + percentiles : list-like of numbers + The percentiles to include in the output. + """ + # GH-30164 + from pandas import Series + + formatted_percentiles = format_percentiles(percentiles) + + stat_index = ["count", "mean", "min"] + formatted_percentiles + ["max"] + d = ( + [data.count(), data.mean(), data.min()] + + data.quantile(percentiles).tolist() + + [data.max()] + ) + return Series(d, index=stat_index, name=data.name) + + +def select_describe_func( + data: Series, +) -> Callable: + """Select proper function for describing series based on data type. + + Parameters + ---------- + data : Series + Series to be described. + """ + if is_bool_dtype(data.dtype): + return describe_categorical_1d + elif is_numeric_dtype(data): + return describe_numeric_1d + elif data.dtype.kind == "M" or isinstance(data.dtype, DatetimeTZDtype): + return describe_timestamp_1d + elif data.dtype.kind == "m": + return describe_numeric_1d + else: + return describe_categorical_1d + + +def _refine_percentiles( + percentiles: Sequence[float] | np.ndarray | None, +) -> npt.NDArray[np.float64]: + """ + Ensure that percentiles are unique and sorted. + + Parameters + ---------- + percentiles : list-like of numbers, optional + The percentiles to include in the output. + """ + if percentiles is None: + return np.array([0.25, 0.5, 0.75]) + + # explicit conversion of `percentiles` to list + percentiles = list(percentiles) + + # get them all to be in [0, 1] + validate_percentile(percentiles) + + # median should always be included + if 0.5 not in percentiles: + percentiles.append(0.5) + + percentiles = np.asarray(percentiles) + + # sort and check for duplicates + unique_pcts = np.unique(percentiles) + assert percentiles is not None + if len(unique_pcts) < len(percentiles): + raise ValueError("percentiles cannot contain duplicates") + + return unique_pcts diff --git a/venv/lib/python3.10/site-packages/pandas/core/methods/selectn.py b/venv/lib/python3.10/site-packages/pandas/core/methods/selectn.py new file mode 100644 index 0000000000000000000000000000000000000000..a2f8ca94134b897bd3f7824a6cb45ca13516e5ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/methods/selectn.py @@ -0,0 +1,269 @@ +""" +Implementation of nlargest and nsmallest. +""" + +from __future__ import annotations + +from collections.abc import ( + Hashable, + Sequence, +) +from typing import ( + TYPE_CHECKING, + cast, + final, +) + +import numpy as np + +from pandas._libs import algos as libalgos + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_complex_dtype, + is_integer_dtype, + is_list_like, + is_numeric_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import BaseMaskedDtype + +if TYPE_CHECKING: + from pandas._typing import ( + DtypeObj, + IndexLabel, + ) + + from pandas import ( + DataFrame, + Series, + ) + + +class SelectN: + def __init__(self, obj, n: int, keep: str) -> None: + self.obj = obj + self.n = n + self.keep = keep + + if self.keep not in ("first", "last", "all"): + raise ValueError('keep must be either "first", "last" or "all"') + + def compute(self, method: str) -> DataFrame | Series: + raise NotImplementedError + + @final + def nlargest(self): + return self.compute("nlargest") + + @final + def nsmallest(self): + return self.compute("nsmallest") + + @final + @staticmethod + def is_valid_dtype_n_method(dtype: DtypeObj) -> bool: + """ + Helper function to determine if dtype is valid for + nsmallest/nlargest methods + """ + if is_numeric_dtype(dtype): + return not is_complex_dtype(dtype) + return needs_i8_conversion(dtype) + + +class SelectNSeries(SelectN): + """ + Implement n largest/smallest for Series + + Parameters + ---------- + obj : Series + n : int + keep : {'first', 'last'}, default 'first' + + Returns + ------- + nordered : Series + """ + + def compute(self, method: str) -> Series: + from pandas.core.reshape.concat import concat + + n = self.n + dtype = self.obj.dtype + if not self.is_valid_dtype_n_method(dtype): + raise TypeError(f"Cannot use method '{method}' with dtype {dtype}") + + if n <= 0: + return self.obj[[]] + + dropped = self.obj.dropna() + nan_index = self.obj.drop(dropped.index) + + # slow method + if n >= len(self.obj): + ascending = method == "nsmallest" + return self.obj.sort_values(ascending=ascending).head(n) + + # fast method + new_dtype = dropped.dtype + + # Similar to algorithms._ensure_data + arr = dropped._values + if needs_i8_conversion(arr.dtype): + arr = arr.view("i8") + elif isinstance(arr.dtype, BaseMaskedDtype): + arr = arr._data + else: + arr = np.asarray(arr) + if arr.dtype.kind == "b": + arr = arr.view(np.uint8) + + if method == "nlargest": + arr = -arr + if is_integer_dtype(new_dtype): + # GH 21426: ensure reverse ordering at boundaries + arr -= 1 + + elif is_bool_dtype(new_dtype): + # GH 26154: ensure False is smaller than True + arr = 1 - (-arr) + + if self.keep == "last": + arr = arr[::-1] + + nbase = n + narr = len(arr) + n = min(n, narr) + + # arr passed into kth_smallest must be contiguous. We copy + # here because kth_smallest will modify its input + # avoid OOB access with kth_smallest_c when n <= 0 + if len(arr) > 0: + kth_val = libalgos.kth_smallest(arr.copy(order="C"), n - 1) + else: + kth_val = np.nan + (ns,) = np.nonzero(arr <= kth_val) + inds = ns[arr[ns].argsort(kind="mergesort")] + + if self.keep != "all": + inds = inds[:n] + findex = nbase + else: + if len(inds) < nbase <= len(nan_index) + len(inds): + findex = len(nan_index) + len(inds) + else: + findex = len(inds) + + if self.keep == "last": + # reverse indices + inds = narr - 1 - inds + + return concat([dropped.iloc[inds], nan_index]).iloc[:findex] + + +class SelectNFrame(SelectN): + """ + Implement n largest/smallest for DataFrame + + Parameters + ---------- + obj : DataFrame + n : int + keep : {'first', 'last'}, default 'first' + columns : list or str + + Returns + ------- + nordered : DataFrame + """ + + def __init__(self, obj: DataFrame, n: int, keep: str, columns: IndexLabel) -> None: + super().__init__(obj, n, keep) + if not is_list_like(columns) or isinstance(columns, tuple): + columns = [columns] + + columns = cast(Sequence[Hashable], columns) + columns = list(columns) + self.columns = columns + + def compute(self, method: str) -> DataFrame: + from pandas.core.api import Index + + n = self.n + frame = self.obj + columns = self.columns + + for column in columns: + dtype = frame[column].dtype + if not self.is_valid_dtype_n_method(dtype): + raise TypeError( + f"Column {repr(column)} has dtype {dtype}, " + f"cannot use method {repr(method)} with this dtype" + ) + + def get_indexer(current_indexer, other_indexer): + """ + Helper function to concat `current_indexer` and `other_indexer` + depending on `method` + """ + if method == "nsmallest": + return current_indexer.append(other_indexer) + else: + return other_indexer.append(current_indexer) + + # Below we save and reset the index in case index contains duplicates + original_index = frame.index + cur_frame = frame = frame.reset_index(drop=True) + cur_n = n + indexer = Index([], dtype=np.int64) + + for i, column in enumerate(columns): + # For each column we apply method to cur_frame[column]. + # If it's the last column or if we have the number of + # results desired we are done. + # Otherwise there are duplicates of the largest/smallest + # value and we need to look at the rest of the columns + # to determine which of the rows with the largest/smallest + # value in the column to keep. + series = cur_frame[column] + is_last_column = len(columns) - 1 == i + values = getattr(series, method)( + cur_n, keep=self.keep if is_last_column else "all" + ) + + if is_last_column or len(values) <= cur_n: + indexer = get_indexer(indexer, values.index) + break + + # Now find all values which are equal to + # the (nsmallest: largest)/(nlargest: smallest) + # from our series. + border_value = values == values[values.index[-1]] + + # Some of these values are among the top-n + # some aren't. + unsafe_values = values[border_value] + + # These values are definitely among the top-n + safe_values = values[~border_value] + indexer = get_indexer(indexer, safe_values.index) + + # Go on and separate the unsafe_values on the remaining + # columns. + cur_frame = cur_frame.loc[unsafe_values.index] + cur_n = n - len(indexer) + + frame = frame.take(indexer) + + # Restore the index on frame + frame.index = original_index.take(indexer) + + # If there is only one column, the frame is already sorted. + if len(columns) == 1: + return frame + + ascending = method == "nsmallest" + + return frame.sort_values(columns, ascending=ascending, kind="mergesort") diff --git a/venv/lib/python3.10/site-packages/pandas/core/methods/to_dict.py b/venv/lib/python3.10/site-packages/pandas/core/methods/to_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..accbd92a91ed66fe4dfd1e3939fe8c498184285d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/methods/to_dict.py @@ -0,0 +1,272 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Literal, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import maybe_box_native +from pandas.core.dtypes.dtypes import ( + BaseMaskedDtype, + ExtensionDtype, +) + +from pandas.core import common as com + +if TYPE_CHECKING: + from pandas._typing import MutableMappingT + + from pandas import DataFrame + + +@overload +def to_dict( + df: DataFrame, + orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., + *, + into: type[MutableMappingT] | MutableMappingT, + index: bool = ..., +) -> MutableMappingT: + ... + + +@overload +def to_dict( + df: DataFrame, + orient: Literal["records"], + *, + into: type[MutableMappingT] | MutableMappingT, + index: bool = ..., +) -> list[MutableMappingT]: + ... + + +@overload +def to_dict( + df: DataFrame, + orient: Literal["dict", "list", "series", "split", "tight", "index"] = ..., + *, + into: type[dict] = ..., + index: bool = ..., +) -> dict: + ... + + +@overload +def to_dict( + df: DataFrame, + orient: Literal["records"], + *, + into: type[dict] = ..., + index: bool = ..., +) -> list[dict]: + ... + + +# error: Incompatible default for argument "into" (default has type "type[dict +# [Any, Any]]", argument has type "type[MutableMappingT] | MutableMappingT") +def to_dict( + df: DataFrame, + orient: Literal[ + "dict", "list", "series", "split", "tight", "records", "index" + ] = "dict", + *, + into: type[MutableMappingT] | MutableMappingT = dict, # type: ignore[assignment] + index: bool = True, +) -> MutableMappingT | list[MutableMappingT]: + """ + Convert the DataFrame to a dictionary. + + The type of the key-value pairs can be customized with the parameters + (see below). + + Parameters + ---------- + orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'} + Determines the type of the values of the dictionary. + + - 'dict' (default) : dict like {column -> {index -> value}} + - 'list' : dict like {column -> [values]} + - 'series' : dict like {column -> Series(values)} + - 'split' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} + - 'tight' : dict like + {'index' -> [index], 'columns' -> [columns], 'data' -> [values], + 'index_names' -> [index.names], 'column_names' -> [column.names]} + - 'records' : list like + [{column -> value}, ... , {column -> value}] + - 'index' : dict like {index -> {column -> value}} + + .. versionadded:: 1.4.0 + 'tight' as an allowed value for the ``orient`` argument + + into : class, default dict + The collections.abc.MutableMapping subclass used for all Mappings + in the return value. Can be the actual class or an empty + instance of the mapping type you want. If you want a + collections.defaultdict, you must pass it initialized. + + index : bool, default True + Whether to include the index item (and index_names item if `orient` + is 'tight') in the returned dictionary. Can only be ``False`` + when `orient` is 'split' or 'tight'. + + .. versionadded:: 2.0.0 + + Returns + ------- + dict, list or collections.abc.Mapping + Return a collections.abc.MutableMapping object representing the + DataFrame. The resulting transformation depends on the `orient` parameter. + """ + if not df.columns.is_unique: + warnings.warn( + "DataFrame columns are not unique, some columns will be omitted.", + UserWarning, + stacklevel=find_stack_level(), + ) + # GH16122 + into_c = com.standardize_mapping(into) + + # error: Incompatible types in assignment (expression has type "str", + # variable has type "Literal['dict', 'list', 'series', 'split', 'tight', + # 'records', 'index']") + orient = orient.lower() # type: ignore[assignment] + + if not index and orient not in ["split", "tight"]: + raise ValueError( + "'index=False' is only valid when 'orient' is 'split' or 'tight'" + ) + + if orient == "series": + # GH46470 Return quickly if orient series to avoid creating dtype objects + return into_c((k, v) for k, v in df.items()) + + box_native_indices = [ + i + for i, col_dtype in enumerate(df.dtypes.values) + if col_dtype == np.dtype(object) or isinstance(col_dtype, ExtensionDtype) + ] + box_na_values = [ + lib.no_default if not isinstance(col_dtype, BaseMaskedDtype) else libmissing.NA + for i, col_dtype in enumerate(df.dtypes.values) + ] + are_all_object_dtype_cols = len(box_native_indices) == len(df.dtypes) + + if orient == "dict": + return into_c((k, v.to_dict(into=into)) for k, v in df.items()) + + elif orient == "list": + object_dtype_indices_as_set: set[int] = set(box_native_indices) + return into_c( + ( + k, + list(map(maybe_box_native, v.to_numpy(na_value=box_na_values[i]))) + if i in object_dtype_indices_as_set + else list(map(maybe_box_native, v.to_numpy())), + ) + for i, (k, v) in enumerate(df.items()) + ) + + elif orient == "split": + data = df._create_data_for_split_and_tight_to_dict( + are_all_object_dtype_cols, box_native_indices + ) + + return into_c( + ((("index", df.index.tolist()),) if index else ()) + + ( + ("columns", df.columns.tolist()), + ("data", data), + ) + ) + + elif orient == "tight": + data = df._create_data_for_split_and_tight_to_dict( + are_all_object_dtype_cols, box_native_indices + ) + + return into_c( + ((("index", df.index.tolist()),) if index else ()) + + ( + ("columns", df.columns.tolist()), + ( + "data", + [ + list(map(maybe_box_native, t)) + for t in df.itertuples(index=False, name=None) + ], + ), + ) + + ((("index_names", list(df.index.names)),) if index else ()) + + (("column_names", list(df.columns.names)),) + ) + + elif orient == "records": + columns = df.columns.tolist() + if are_all_object_dtype_cols: + rows = ( + dict(zip(columns, row)) for row in df.itertuples(index=False, name=None) + ) + return [ + into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows + ] + else: + data = [ + into_c(zip(columns, t)) for t in df.itertuples(index=False, name=None) + ] + if box_native_indices: + object_dtype_indices_as_set = set(box_native_indices) + object_dtype_cols = { + col + for i, col in enumerate(df.columns) + if i in object_dtype_indices_as_set + } + for row in data: + for col in object_dtype_cols: + row[col] = maybe_box_native(row[col]) + return data + + elif orient == "index": + if not df.index.is_unique: + raise ValueError("DataFrame index must be unique for orient='index'.") + columns = df.columns.tolist() + if are_all_object_dtype_cols: + return into_c( + (t[0], dict(zip(df.columns, map(maybe_box_native, t[1:])))) + for t in df.itertuples(name=None) + ) + elif box_native_indices: + object_dtype_indices_as_set = set(box_native_indices) + is_object_dtype_by_index = [ + i in object_dtype_indices_as_set for i in range(len(df.columns)) + ] + return into_c( + ( + t[0], + { + columns[i]: maybe_box_native(v) + if is_object_dtype_by_index[i] + else v + for i, v in enumerate(t[1:]) + }, + ) + for t in df.itertuples(name=None) + ) + else: + return into_c( + (t[0], dict(zip(df.columns, t[1:]))) for t in df.itertuples(name=None) + ) + + else: + raise ValueError(f"orient '{orient}' not understood") diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__init__.py b/venv/lib/python3.10/site-packages/pandas/core/window/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..857e12e5467a6a7d2263d9add33e65b9499778fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/window/__init__.py @@ -0,0 +1,23 @@ +from pandas.core.window.ewm import ( + ExponentialMovingWindow, + ExponentialMovingWindowGroupby, +) +from pandas.core.window.expanding import ( + Expanding, + ExpandingGroupby, +) +from pandas.core.window.rolling import ( + Rolling, + RollingGroupby, + Window, +) + +__all__ = [ + "Expanding", + "ExpandingGroupby", + "ExponentialMovingWindow", + "ExponentialMovingWindowGroupby", + "Rolling", + "RollingGroupby", + "Window", +] diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bab17e7b93d8b835124ba48a2a55d14c86acb1ca Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5fb62ea301728974ee815703cefbcc18b00ba02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/doc.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/doc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69455f9b38a84883b8368f12618defba513252f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/doc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/ewm.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/ewm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca1e2fbefad494ea756cf5c82896807a716dc2e8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/ewm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/expanding.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/expanding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba0bebbf8a293ee3f887b0a38e4c233b99dbecfe Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/expanding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/numba_.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/numba_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04e8d5bc0593c3d5d54c7104bc4590e0d479d59c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/numba_.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/online.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/online.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb21df3d05f21eb4f8fd9b1434fa111804954d80 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/online.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/rolling.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/rolling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..144586b25a31ded0f8ca108e1b2c348f42341626 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/core/window/__pycache__/rolling.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/common.py b/venv/lib/python3.10/site-packages/pandas/core/window/common.py new file mode 100644 index 0000000000000000000000000000000000000000..fc8eddca09c84029b9ac92b658a47b45e4764414 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/window/common.py @@ -0,0 +1,169 @@ +"""Common utility functions for rolling operations""" +from __future__ import annotations + +from collections import defaultdict +from typing import cast + +import numpy as np + +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core.indexes.api import MultiIndex + + +def flex_binary_moment(arg1, arg2, f, pairwise: bool = False): + if isinstance(arg1, ABCSeries) and isinstance(arg2, ABCSeries): + X, Y = prep_binary(arg1, arg2) + return f(X, Y) + + elif isinstance(arg1, ABCDataFrame): + from pandas import DataFrame + + def dataframe_from_int_dict(data, frame_template) -> DataFrame: + result = DataFrame(data, index=frame_template.index) + if len(result.columns) > 0: + result.columns = frame_template.columns[result.columns] + else: + result.columns = frame_template.columns.copy() + return result + + results = {} + if isinstance(arg2, ABCDataFrame): + if pairwise is False: + if arg1 is arg2: + # special case in order to handle duplicate column names + for i in range(len(arg1.columns)): + results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) + return dataframe_from_int_dict(results, arg1) + else: + if not arg1.columns.is_unique: + raise ValueError("'arg1' columns are not unique") + if not arg2.columns.is_unique: + raise ValueError("'arg2' columns are not unique") + X, Y = arg1.align(arg2, join="outer") + X, Y = prep_binary(X, Y) + res_columns = arg1.columns.union(arg2.columns) + for col in res_columns: + if col in X and col in Y: + results[col] = f(X[col], Y[col]) + return DataFrame(results, index=X.index, columns=res_columns) + elif pairwise is True: + results = defaultdict(dict) + for i in range(len(arg1.columns)): + for j in range(len(arg2.columns)): + if j < i and arg2 is arg1: + # Symmetric case + results[i][j] = results[j][i] + else: + results[i][j] = f( + *prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]) + ) + + from pandas import concat + + result_index = arg1.index.union(arg2.index) + if len(result_index): + # construct result frame + result = concat( + [ + concat( + [results[i][j] for j in range(len(arg2.columns))], + ignore_index=True, + ) + for i in range(len(arg1.columns)) + ], + ignore_index=True, + axis=1, + ) + result.columns = arg1.columns + + # set the index and reorder + if arg2.columns.nlevels > 1: + # mypy needs to know columns is a MultiIndex, Index doesn't + # have levels attribute + arg2.columns = cast(MultiIndex, arg2.columns) + # GH 21157: Equivalent to MultiIndex.from_product( + # [result_index], , + # ) + # A normal MultiIndex.from_product will produce too many + # combinations. + result_level = np.tile( + result_index, len(result) // len(result_index) + ) + arg2_levels = ( + np.repeat( + arg2.columns.get_level_values(i), + len(result) // len(arg2.columns), + ) + for i in range(arg2.columns.nlevels) + ) + result_names = list(arg2.columns.names) + [result_index.name] + result.index = MultiIndex.from_arrays( + [*arg2_levels, result_level], names=result_names + ) + # GH 34440 + num_levels = len(result.index.levels) + new_order = [num_levels - 1] + list(range(num_levels - 1)) + result = result.reorder_levels(new_order).sort_index() + else: + result.index = MultiIndex.from_product( + [range(len(arg2.columns)), range(len(result_index))] + ) + result = result.swaplevel(1, 0).sort_index() + result.index = MultiIndex.from_product( + [result_index] + [arg2.columns] + ) + else: + # empty result + result = DataFrame( + index=MultiIndex( + levels=[arg1.index, arg2.columns], codes=[[], []] + ), + columns=arg2.columns, + dtype="float64", + ) + + # reset our index names to arg1 names + # reset our column names to arg2 names + # careful not to mutate the original names + result.columns = result.columns.set_names(arg1.columns.names) + result.index = result.index.set_names( + result_index.names + arg2.columns.names + ) + + return result + else: + results = { + i: f(*prep_binary(arg1.iloc[:, i], arg2)) + for i in range(len(arg1.columns)) + } + return dataframe_from_int_dict(results, arg1) + + else: + return flex_binary_moment(arg2, arg1, f) + + +def zsqrt(x): + with np.errstate(all="ignore"): + result = np.sqrt(x) + mask = x < 0 + + if isinstance(x, ABCDataFrame): + if mask._values.any(): + result[mask] = 0 + else: + if mask.any(): + result[mask] = 0 + + return result + + +def prep_binary(arg1, arg2): + # mask out values, this also makes a common index... + X = arg1 + 0 * arg2 + Y = arg2 + 0 * arg1 + + return X, Y diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/doc.py b/venv/lib/python3.10/site-packages/pandas/core/window/doc.py new file mode 100644 index 0000000000000000000000000000000000000000..2a5cbc04921fadacf18a89608f2c0665bd8177e2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/window/doc.py @@ -0,0 +1,116 @@ +"""Any shareable docstring components for rolling/expanding/ewm""" +from __future__ import annotations + +from textwrap import dedent + +from pandas.core.shared_docs import _shared_docs + +_shared_docs = dict(**_shared_docs) + + +def create_section_header(header: str) -> str: + """Create numpydoc section header""" + return f"{header}\n{'-' * len(header)}\n" + + +template_header = "\nCalculate the {window_method} {aggregation_description}.\n\n" + +template_returns = dedent( + """ + Series or DataFrame + Return type is the same as the original object with ``np.float64`` dtype.\n + """ +).replace("\n", "", 1) + +template_see_also = dedent( + """ + pandas.Series.{window_method} : Calling {window_method} with Series data. + pandas.DataFrame.{window_method} : Calling {window_method} with DataFrames. + pandas.Series.{agg_method} : Aggregating {agg_method} for Series. + pandas.DataFrame.{agg_method} : Aggregating {agg_method} for DataFrame.\n + """ +).replace("\n", "", 1) + +kwargs_numeric_only = dedent( + """ + numeric_only : bool, default False + Include only float, int, boolean columns. + + .. versionadded:: 1.5.0\n + """ +).replace("\n", "", 1) + +kwargs_scipy = dedent( + """ + **kwargs + Keyword arguments to configure the ``SciPy`` weighted window type.\n + """ +).replace("\n", "", 1) + +window_apply_parameters = dedent( + """ + func : function + Must produce a single value from an ndarray input if ``raw=True`` + or a single value from a Series if ``raw=False``. Can also accept a + Numba JIT function with ``engine='numba'`` specified. + + raw : bool, default False + * ``False`` : passes each row or column as a Series to the + function. + * ``True`` : the passed function will receive ndarray + objects instead. + If you are just applying a NumPy reduction function this will + achieve much better performance. + + engine : str, default None + * ``'cython'`` : Runs rolling apply through C-extensions from cython. + * ``'numba'`` : Runs rolling apply through JIT compiled code from numba. + Only available when ``raw`` is set to ``True``. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to both the ``func`` and the ``apply`` rolling aggregation. + + args : tuple, default None + Positional arguments to be passed into func. + + kwargs : dict, default None + Keyword arguments to be passed into func.\n + """ +).replace("\n", "", 1) + +numba_notes = ( + "See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for " + "extended documentation and performance considerations for the Numba engine.\n\n" +) + + +def window_agg_numba_parameters(version: str = "1.3") -> str: + return ( + dedent( + """ + engine : str, default None + * ``'cython'`` : Runs the operation through C-extensions from cython. + * ``'numba'`` : Runs the operation through JIT compiled code from numba. + * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba`` + + .. versionadded:: {version}.0 + + engine_kwargs : dict, default None + * For ``'cython'`` engine, there are no accepted ``engine_kwargs`` + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` + + .. versionadded:: {version}.0\n + """ + ) + .replace("\n", "", 1) + .replace("{version}", version) + ) diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/ewm.py b/venv/lib/python3.10/site-packages/pandas/core/window/ewm.py new file mode 100644 index 0000000000000000000000000000000000000000..9ebf32d3e536eee7233b911acb880f3d40521793 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/window/ewm.py @@ -0,0 +1,1095 @@ +from __future__ import annotations + +import datetime +from functools import partial +from textwrap import dedent +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.tslibs import Timedelta +import pandas._libs.window.aggregations as window_aggregations +from pandas.util._decorators import doc + +from pandas.core.dtypes.common import ( + is_datetime64_dtype, + is_numeric_dtype, +) +from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.missing import isna + +from pandas.core import common +from pandas.core.arrays.datetimelike import dtype_to_unit +from pandas.core.indexers.objects import ( + BaseIndexer, + ExponentialMovingWindowIndexer, + GroupbyIndexer, +) +from pandas.core.util.numba_ import ( + get_jit_arguments, + maybe_use_numba, +) +from pandas.core.window.common import zsqrt +from pandas.core.window.doc import ( + _shared_docs, + create_section_header, + kwargs_numeric_only, + numba_notes, + template_header, + template_returns, + template_see_also, + window_agg_numba_parameters, +) +from pandas.core.window.numba_ import ( + generate_numba_ewm_func, + generate_numba_ewm_table_func, +) +from pandas.core.window.online import ( + EWMMeanState, + generate_online_numba_ewma_func, +) +from pandas.core.window.rolling import ( + BaseWindow, + BaseWindowGroupby, +) + +if TYPE_CHECKING: + from pandas._typing import ( + Axis, + TimedeltaConvertibleTypes, + npt, + ) + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.generic import NDFrame + + +def get_center_of_mass( + comass: float | None, + span: float | None, + halflife: float | None, + alpha: float | None, +) -> float: + valid_count = common.count_not_none(comass, span, halflife, alpha) + if valid_count > 1: + raise ValueError("comass, span, halflife, and alpha are mutually exclusive") + + # Convert to center of mass; domain checks ensure 0 < alpha <= 1 + if comass is not None: + if comass < 0: + raise ValueError("comass must satisfy: comass >= 0") + elif span is not None: + if span < 1: + raise ValueError("span must satisfy: span >= 1") + comass = (span - 1) / 2 + elif halflife is not None: + if halflife <= 0: + raise ValueError("halflife must satisfy: halflife > 0") + decay = 1 - np.exp(np.log(0.5) / halflife) + comass = 1 / decay - 1 + elif alpha is not None: + if alpha <= 0 or alpha > 1: + raise ValueError("alpha must satisfy: 0 < alpha <= 1") + comass = (1 - alpha) / alpha + else: + raise ValueError("Must pass one of comass, span, halflife, or alpha") + + return float(comass) + + +def _calculate_deltas( + times: np.ndarray | NDFrame, + halflife: float | TimedeltaConvertibleTypes | None, +) -> npt.NDArray[np.float64]: + """ + Return the diff of the times divided by the half-life. These values are used in + the calculation of the ewm mean. + + Parameters + ---------- + times : np.ndarray, Series + Times corresponding to the observations. Must be monotonically increasing + and ``datetime64[ns]`` dtype. + halflife : float, str, timedelta, optional + Half-life specifying the decay + + Returns + ------- + np.ndarray + Diff of the times divided by the half-life + """ + unit = dtype_to_unit(times.dtype) + if isinstance(times, ABCSeries): + times = times._values + _times = np.asarray(times.view(np.int64), dtype=np.float64) + _halflife = float(Timedelta(halflife).as_unit(unit)._value) + return np.diff(_times) / _halflife + + +class ExponentialMovingWindow(BaseWindow): + r""" + Provide exponentially weighted (EW) calculations. + + Exactly one of ``com``, ``span``, ``halflife``, or ``alpha`` must be + provided if ``times`` is not provided. If ``times`` is provided, + ``halflife`` and one of ``com``, ``span`` or ``alpha`` may be provided. + + Parameters + ---------- + com : float, optional + Specify decay in terms of center of mass + + :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. + + span : float, optional + Specify decay in terms of span + + :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. + + halflife : float, str, timedelta, optional + Specify decay in terms of half-life + + :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for + :math:`halflife > 0`. + + If ``times`` is specified, a timedelta convertible unit over which an + observation decays to half its value. Only applicable to ``mean()``, + and halflife value will not apply to the other functions. + + alpha : float, optional + Specify smoothing factor :math:`\alpha` directly + + :math:`0 < \alpha \leq 1`. + + min_periods : int, default 0 + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + adjust : bool, default True + Divide by decaying adjustment factor in beginning periods to account + for imbalance in relative weightings (viewing EWMA as a moving average). + + - When ``adjust=True`` (default), the EW function is calculated using weights + :math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series + [:math:`x_0, x_1, ..., x_t`] would be: + + .. math:: + y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 - + \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t} + + - When ``adjust=False``, the exponentially weighted function is calculated + recursively: + + .. math:: + \begin{split} + y_0 &= x_0\\ + y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, + \end{split} + ignore_na : bool, default False + Ignore missing values when calculating weights. + + - When ``ignore_na=False`` (default), weights are based on absolute positions. + For example, the weights of :math:`x_0` and :math:`x_2` used in calculating + the final weighted average of [:math:`x_0`, None, :math:`x_2`] are + :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and + :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``. + + - When ``ignore_na=True``, weights are based + on relative positions. For example, the weights of :math:`x_0` and :math:`x_2` + used in calculating the final weighted average of + [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if + ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``. + + axis : {0, 1}, default 0 + If ``0`` or ``'index'``, calculate across the rows. + + If ``1`` or ``'columns'``, calculate across the columns. + + For `Series` this parameter is unused and defaults to 0. + + times : np.ndarray, Series, default None + + Only applicable to ``mean()``. + + Times corresponding to the observations. Must be monotonically increasing and + ``datetime64[ns]`` dtype. + + If 1-D array like, a sequence with the same shape as the observations. + + method : str {'single', 'table'}, default 'single' + .. versionadded:: 1.4.0 + + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + Only applicable to ``mean()`` + + Returns + ------- + pandas.api.typing.ExponentialMovingWindow + + See Also + -------- + rolling : Provides rolling window calculations. + expanding : Provides expanding transformations. + + Notes + ----- + See :ref:`Windowing Operations ` + for further usage details and examples. + + Examples + -------- + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + >>> df.ewm(com=0.5).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + >>> df.ewm(alpha=2 / 3).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + + **adjust** + + >>> df.ewm(com=0.5, adjust=True).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + >>> df.ewm(com=0.5, adjust=False).mean() + B + 0 0.000000 + 1 0.666667 + 2 1.555556 + 3 1.555556 + 4 3.650794 + + **ignore_na** + + >>> df.ewm(com=0.5, ignore_na=True).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.225000 + >>> df.ewm(com=0.5, ignore_na=False).mean() + B + 0 0.000000 + 1 0.750000 + 2 1.615385 + 3 1.615385 + 4 3.670213 + + **times** + + Exponentially weighted mean with weights calculated with a timedelta ``halflife`` + relative to ``times``. + + >>> times = ['2020-01-01', '2020-01-03', '2020-01-10', '2020-01-15', '2020-01-17'] + >>> df.ewm(halflife='4 days', times=pd.DatetimeIndex(times)).mean() + B + 0 0.000000 + 1 0.585786 + 2 1.523889 + 3 1.523889 + 4 3.233686 + """ + + _attributes = [ + "com", + "span", + "halflife", + "alpha", + "min_periods", + "adjust", + "ignore_na", + "axis", + "times", + "method", + ] + + def __init__( + self, + obj: NDFrame, + com: float | None = None, + span: float | None = None, + halflife: float | TimedeltaConvertibleTypes | None = None, + alpha: float | None = None, + min_periods: int | None = 0, + adjust: bool = True, + ignore_na: bool = False, + axis: Axis = 0, + times: np.ndarray | NDFrame | None = None, + method: str = "single", + *, + selection=None, + ) -> None: + super().__init__( + obj=obj, + min_periods=1 if min_periods is None else max(int(min_periods), 1), + on=None, + center=False, + closed=None, + method=method, + axis=axis, + selection=selection, + ) + self.com = com + self.span = span + self.halflife = halflife + self.alpha = alpha + self.adjust = adjust + self.ignore_na = ignore_na + self.times = times + if self.times is not None: + if not self.adjust: + raise NotImplementedError("times is not supported with adjust=False.") + times_dtype = getattr(self.times, "dtype", None) + if not ( + is_datetime64_dtype(times_dtype) + or isinstance(times_dtype, DatetimeTZDtype) + ): + raise ValueError("times must be datetime64 dtype.") + if len(self.times) != len(obj): + raise ValueError("times must be the same length as the object.") + if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)): + raise ValueError("halflife must be a timedelta convertible object") + if isna(self.times).any(): + raise ValueError("Cannot convert NaT values to integer") + self._deltas = _calculate_deltas(self.times, self.halflife) + # Halflife is no longer applicable when calculating COM + # But allow COM to still be calculated if the user passes other decay args + if common.count_not_none(self.com, self.span, self.alpha) > 0: + self._com = get_center_of_mass(self.com, self.span, None, self.alpha) + else: + self._com = 1.0 + else: + if self.halflife is not None and isinstance( + self.halflife, (str, datetime.timedelta, np.timedelta64) + ): + raise ValueError( + "halflife can only be a timedelta convertible argument if " + "times is not None." + ) + # Without times, points are equally spaced + self._deltas = np.ones( + max(self.obj.shape[self.axis] - 1, 0), dtype=np.float64 + ) + self._com = get_center_of_mass( + # error: Argument 3 to "get_center_of_mass" has incompatible type + # "Union[float, Any, None, timedelta64, signedinteger[_64Bit]]"; + # expected "Optional[float]" + self.com, + self.span, + self.halflife, # type: ignore[arg-type] + self.alpha, + ) + + def _check_window_bounds( + self, start: np.ndarray, end: np.ndarray, num_vals: int + ) -> None: + # emw algorithms are iterative with each point + # ExponentialMovingWindowIndexer "bounds" are the entire window + pass + + def _get_window_indexer(self) -> BaseIndexer: + """ + Return an indexer class that will compute the window start and end bounds + """ + return ExponentialMovingWindowIndexer() + + def online( + self, engine: str = "numba", engine_kwargs=None + ) -> OnlineExponentialMovingWindow: + """ + Return an ``OnlineExponentialMovingWindow`` object to calculate + exponentially moving window aggregations in an online method. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + engine: str, default ``'numba'`` + Execution engine to calculate online aggregations. + Applies to all supported aggregation methods. + + engine_kwargs : dict, default None + Applies to all supported aggregation methods. + + * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil`` + and ``parallel`` dictionary keys. The values must either be ``True`` or + ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is + ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be + applied to the function + + Returns + ------- + OnlineExponentialMovingWindow + """ + return OnlineExponentialMovingWindow( + obj=self.obj, + com=self.com, + span=self.span, + halflife=self.halflife, + alpha=self.alpha, + min_periods=self.min_periods, + adjust=self.adjust, + ignore_na=self.ignore_na, + axis=self.axis, + times=self.times, + engine=engine, + engine_kwargs=engine_kwargs, + selection=self._selection, + ) + + @doc( + _shared_docs["aggregate"], + see_also=dedent( + """ + See Also + -------- + pandas.DataFrame.rolling.aggregate + """ + ), + examples=dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 1.000000 4.000000 7.000000 + 1 1.666667 4.666667 7.666667 + 2 2.428571 5.428571 8.428571 + """ + ), + klass="Series/Dataframe", + axis="", + ) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) + + agg = aggregate + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.ewm(alpha=.2).mean() + 0 1.000000 + 1 1.555556 + 2 2.147541 + 3 2.775068 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) mean", + agg_method="mean", + ) + def mean( + self, + numeric_only: bool = False, + engine=None, + engine_kwargs=None, + ): + if maybe_use_numba(engine): + if self.method == "single": + func = generate_numba_ewm_func + else: + func = generate_numba_ewm_table_func + ewm_func = func( + **get_jit_arguments(engine_kwargs), + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + deltas=tuple(self._deltas), + normalize=True, + ) + return self._apply(ewm_func, name="mean") + elif engine in ("cython", None): + if engine_kwargs is not None: + raise ValueError("cython engine does not accept engine_kwargs") + + deltas = None if self.times is None else self._deltas + window_func = partial( + window_aggregations.ewm, + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + deltas=deltas, + normalize=True, + ) + return self._apply(window_func, name="mean", numeric_only=numeric_only) + else: + raise ValueError("engine must be either 'numba' or 'cython'") + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.ewm(alpha=.2).sum() + 0 1.000 + 1 2.800 + 2 5.240 + 3 8.192 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) sum", + agg_method="sum", + ) + def sum( + self, + numeric_only: bool = False, + engine=None, + engine_kwargs=None, + ): + if not self.adjust: + raise NotImplementedError("sum is not implemented with adjust=False") + if maybe_use_numba(engine): + if self.method == "single": + func = generate_numba_ewm_func + else: + func = generate_numba_ewm_table_func + ewm_func = func( + **get_jit_arguments(engine_kwargs), + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + deltas=tuple(self._deltas), + normalize=False, + ) + return self._apply(ewm_func, name="sum") + elif engine in ("cython", None): + if engine_kwargs is not None: + raise ValueError("cython engine does not accept engine_kwargs") + + deltas = None if self.times is None else self._deltas + window_func = partial( + window_aggregations.ewm, + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + deltas=deltas, + normalize=False, + ) + return self._apply(window_func, name="sum", numeric_only=numeric_only) + else: + raise ValueError("engine must be either 'numba' or 'cython'") + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """\ + bias : bool, default False + Use a standard estimation bias correction. + """ + ), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.ewm(alpha=.2).std() + 0 NaN + 1 0.707107 + 2 0.995893 + 3 1.277320 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) standard deviation", + agg_method="std", + ) + def std(self, bias: bool = False, numeric_only: bool = False): + if ( + numeric_only + and self._selected_obj.ndim == 1 + and not is_numeric_dtype(self._selected_obj.dtype) + ): + # Raise directly so error message says std instead of var + raise NotImplementedError( + f"{type(self).__name__}.std does not implement numeric_only" + ) + return zsqrt(self.var(bias=bias, numeric_only=numeric_only)) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """\ + bias : bool, default False + Use a standard estimation bias correction. + """ + ), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.ewm(alpha=.2).var() + 0 NaN + 1 0.500000 + 2 0.991803 + 3 1.631547 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) variance", + agg_method="var", + ) + def var(self, bias: bool = False, numeric_only: bool = False): + window_func = window_aggregations.ewmcov + wfunc = partial( + window_func, + com=self._com, + adjust=self.adjust, + ignore_na=self.ignore_na, + bias=bias, + ) + + def var_func(values, begin, end, min_periods): + return wfunc(values, begin, end, min_periods, values) + + return self._apply(var_func, name="var", numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """\ + other : Series or DataFrame , optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndex DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + bias : bool, default False + Use a standard estimation bias correction. + """ + ), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4]) + >>> ser2 = pd.Series([10, 11, 13, 16]) + >>> ser1.ewm(alpha=.2).cov(ser2) + 0 NaN + 1 0.500000 + 2 1.524590 + 3 3.408836 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) sample covariance", + agg_method="cov", + ) + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + bias: bool = False, + numeric_only: bool = False, + ): + from pandas import Series + + self._validate_numeric_only("cov", numeric_only) + + def cov_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + start, end = window_indexer.get_window_bounds( + num_values=len(x_array), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + result = window_aggregations.ewmcov( + x_array, + start, + end, + # error: Argument 4 to "ewmcov" has incompatible type + # "Optional[int]"; expected "int" + self.min_periods, # type: ignore[arg-type] + y_array, + self._com, + self.adjust, + self.ignore_na, + bias, + ) + return Series(result, index=x.index, name=x.name, copy=False) + + return self._apply_pairwise( + self._selected_obj, other, pairwise, cov_func, numeric_only + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """\ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndex DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + """ + ), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4]) + >>> ser2 = pd.Series([10, 11, 13, 16]) + >>> ser1.ewm(alpha=.2).corr(ser2) + 0 NaN + 1 1.000000 + 2 0.982821 + 3 0.977802 + dtype: float64 + """ + ), + window_method="ewm", + aggregation_description="(exponential weighted moment) sample correlation", + agg_method="corr", + ) + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + numeric_only: bool = False, + ): + from pandas import Series + + self._validate_numeric_only("corr", numeric_only) + + def cov_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + start, end = window_indexer.get_window_bounds( + num_values=len(x_array), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + + def _cov(X, Y): + return window_aggregations.ewmcov( + X, + start, + end, + min_periods, + Y, + self._com, + self.adjust, + self.ignore_na, + True, + ) + + with np.errstate(all="ignore"): + cov = _cov(x_array, y_array) + x_var = _cov(x_array, x_array) + y_var = _cov(y_array, y_array) + result = cov / zsqrt(x_var * y_var) + return Series(result, index=x.index, name=x.name, copy=False) + + return self._apply_pairwise( + self._selected_obj, other, pairwise, cov_func, numeric_only + ) + + +class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow): + """ + Provide an exponential moving window groupby implementation. + """ + + _attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes + + def __init__(self, obj, *args, _grouper=None, **kwargs) -> None: + super().__init__(obj, *args, _grouper=_grouper, **kwargs) + + if not obj.empty and self.times is not None: + # sort the times and recalculate the deltas according to the groups + groupby_order = np.concatenate(list(self._grouper.indices.values())) + self._deltas = _calculate_deltas( + self.times.take(groupby_order), + self.halflife, + ) + + def _get_window_indexer(self) -> GroupbyIndexer: + """ + Return an indexer class that will compute the window start and end bounds + + Returns + ------- + GroupbyIndexer + """ + window_indexer = GroupbyIndexer( + groupby_indices=self._grouper.indices, + window_indexer=ExponentialMovingWindowIndexer, + ) + return window_indexer + + +class OnlineExponentialMovingWindow(ExponentialMovingWindow): + def __init__( + self, + obj: NDFrame, + com: float | None = None, + span: float | None = None, + halflife: float | TimedeltaConvertibleTypes | None = None, + alpha: float | None = None, + min_periods: int | None = 0, + adjust: bool = True, + ignore_na: bool = False, + axis: Axis = 0, + times: np.ndarray | NDFrame | None = None, + engine: str = "numba", + engine_kwargs: dict[str, bool] | None = None, + *, + selection=None, + ) -> None: + if times is not None: + raise NotImplementedError( + "times is not implemented with online operations." + ) + super().__init__( + obj=obj, + com=com, + span=span, + halflife=halflife, + alpha=alpha, + min_periods=min_periods, + adjust=adjust, + ignore_na=ignore_na, + axis=axis, + times=times, + selection=selection, + ) + self._mean = EWMMeanState( + self._com, self.adjust, self.ignore_na, self.axis, obj.shape + ) + if maybe_use_numba(engine): + self.engine = engine + self.engine_kwargs = engine_kwargs + else: + raise ValueError("'numba' is the only supported engine") + + def reset(self) -> None: + """ + Reset the state captured by `update` calls. + """ + self._mean.reset() + + def aggregate(self, func, *args, **kwargs): + raise NotImplementedError("aggregate is not implemented.") + + def std(self, bias: bool = False, *args, **kwargs): + raise NotImplementedError("std is not implemented.") + + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + numeric_only: bool = False, + ): + raise NotImplementedError("corr is not implemented.") + + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + bias: bool = False, + numeric_only: bool = False, + ): + raise NotImplementedError("cov is not implemented.") + + def var(self, bias: bool = False, numeric_only: bool = False): + raise NotImplementedError("var is not implemented.") + + def mean(self, *args, update=None, update_times=None, **kwargs): + """ + Calculate an online exponentially weighted mean. + + Parameters + ---------- + update: DataFrame or Series, default None + New values to continue calculating the + exponentially weighted mean from the last values and weights. + Values should be float64 dtype. + + ``update`` needs to be ``None`` the first time the + exponentially weighted mean is calculated. + + update_times: Series or 1-D np.ndarray, default None + New times to continue calculating the + exponentially weighted mean from the last values and weights. + If ``None``, values are assumed to be evenly spaced + in time. + This feature is currently unsupported. + + Returns + ------- + DataFrame or Series + + Examples + -------- + >>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)}) + >>> online_ewm = df.head(2).ewm(0.5).online() + >>> online_ewm.mean() + a b + 0 0.00 5.00 + 1 0.75 5.75 + >>> online_ewm.mean(update=df.tail(3)) + a b + 2 1.615385 6.615385 + 3 2.550000 7.550000 + 4 3.520661 8.520661 + >>> online_ewm.reset() + >>> online_ewm.mean() + a b + 0 0.00 5.00 + 1 0.75 5.75 + """ + result_kwargs = {} + is_frame = self._selected_obj.ndim == 2 + if update_times is not None: + raise NotImplementedError("update_times is not implemented.") + update_deltas = np.ones( + max(self._selected_obj.shape[self.axis - 1] - 1, 0), dtype=np.float64 + ) + if update is not None: + if self._mean.last_ewm is None: + raise ValueError( + "Must call mean with update=None first before passing update" + ) + result_from = 1 + result_kwargs["index"] = update.index + if is_frame: + last_value = self._mean.last_ewm[np.newaxis, :] + result_kwargs["columns"] = update.columns + else: + last_value = self._mean.last_ewm + result_kwargs["name"] = update.name + np_array = np.concatenate((last_value, update.to_numpy())) + else: + result_from = 0 + result_kwargs["index"] = self._selected_obj.index + if is_frame: + result_kwargs["columns"] = self._selected_obj.columns + else: + result_kwargs["name"] = self._selected_obj.name + np_array = self._selected_obj.astype(np.float64, copy=False).to_numpy() + ewma_func = generate_online_numba_ewma_func( + **get_jit_arguments(self.engine_kwargs) + ) + result = self._mean.run_ewm( + np_array if is_frame else np_array[:, np.newaxis], + update_deltas, + self.min_periods, + ewma_func, + ) + if not is_frame: + result = result.squeeze() + result = result[result_from:] + result = self._selected_obj._constructor(result, **result_kwargs) + return result diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/expanding.py b/venv/lib/python3.10/site-packages/pandas/core/window/expanding.py new file mode 100644 index 0000000000000000000000000000000000000000..aac10596ffc699c2b229f959b9c1b26393384b03 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/window/expanding.py @@ -0,0 +1,964 @@ +from __future__ import annotations + +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, +) + +from pandas.util._decorators import ( + deprecate_kwarg, + doc, +) + +from pandas.core.indexers.objects import ( + BaseIndexer, + ExpandingIndexer, + GroupbyIndexer, +) +from pandas.core.window.doc import ( + _shared_docs, + create_section_header, + kwargs_numeric_only, + numba_notes, + template_header, + template_returns, + template_see_also, + window_agg_numba_parameters, + window_apply_parameters, +) +from pandas.core.window.rolling import ( + BaseWindowGroupby, + RollingAndExpandingMixin, +) + +if TYPE_CHECKING: + from pandas._typing import ( + Axis, + QuantileInterpolation, + WindowingRankType, + ) + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.generic import NDFrame + + +class Expanding(RollingAndExpandingMixin): + """ + Provide expanding window calculations. + + Parameters + ---------- + min_periods : int, default 1 + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + + For `Series` this parameter is unused and defaults to 0. + + method : str {'single', 'table'}, default 'single' + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + .. versionadded:: 1.3.0 + + Returns + ------- + pandas.api.typing.Expanding + + See Also + -------- + rolling : Provides rolling window calculations. + ewm : Provides exponential weighted functions. + + Notes + ----- + See :ref:`Windowing Operations ` for further usage details + and examples. + + Examples + -------- + >>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + **min_periods** + + Expanding sum with 1 vs 3 observations needed to calculate a value. + + >>> df.expanding(1).sum() + B + 0 0.0 + 1 1.0 + 2 3.0 + 3 3.0 + 4 7.0 + >>> df.expanding(3).sum() + B + 0 NaN + 1 NaN + 2 3.0 + 3 3.0 + 4 7.0 + """ + + _attributes: list[str] = ["min_periods", "axis", "method"] + + def __init__( + self, + obj: NDFrame, + min_periods: int = 1, + axis: Axis = 0, + method: str = "single", + selection=None, + ) -> None: + super().__init__( + obj=obj, + min_periods=min_periods, + axis=axis, + method=method, + selection=selection, + ) + + def _get_window_indexer(self) -> BaseIndexer: + """ + Return an indexer class that will compute the window start and end bounds + """ + return ExpandingIndexer() + + @doc( + _shared_docs["aggregate"], + see_also=dedent( + """ + See Also + -------- + pandas.DataFrame.aggregate : Similar DataFrame method. + pandas.Series.aggregate : Similar Series method. + """ + ), + examples=dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.ewm(alpha=0.5).mean() + A B C + 0 1.000000 4.000000 7.000000 + 1 1.666667 4.666667 7.666667 + 2 2.428571 5.428571 8.428571 + """ + ), + klass="Series/Dataframe", + axis="", + ) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) + + agg = aggregate + + @doc( + template_header, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().count() + a 1.0 + b 2.0 + c 3.0 + d 4.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="count of non NaN observations", + agg_method="count", + ) + def count(self, numeric_only: bool = False): + return super().count(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + window_apply_parameters, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().apply(lambda s: s.max() - 2 * s.min()) + a -1.0 + b 0.0 + c 1.0 + d 2.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="custom aggregation function", + agg_method="apply", + ) + def apply( + self, + func: Callable[..., Any], + raw: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + args: tuple[Any, ...] | None = None, + kwargs: dict[str, Any] | None = None, + ): + return super().apply( + func, + raw=raw, + engine=engine, + engine_kwargs=engine_kwargs, + args=args, + kwargs=kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().sum() + a 1.0 + b 3.0 + c 6.0 + d 10.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="sum", + agg_method="sum", + ) + def sum( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().sum( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([3, 2, 1, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().max() + a 3.0 + b 3.0 + c 3.0 + d 4.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="maximum", + agg_method="max", + ) + def max( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().max( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([2, 3, 4, 1], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().min() + a 2.0 + b 2.0 + c 2.0 + d 1.0 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="minimum", + agg_method="min", + ) + def min( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().min( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().mean() + a 1.0 + b 1.5 + c 2.0 + d 2.5 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="mean", + agg_method="mean", + ) + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().mean( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser.expanding().median() + a 1.0 + b 1.5 + c 2.0 + d 2.5 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="median", + agg_method="median", + ) + def median( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().median( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements.\n + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + window_agg_numba_parameters("1.4"), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "numpy.std : Equivalent method for NumPy array.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + The default ``ddof`` of 1 used in :meth:`Series.std` is different + than the default ``ddof`` of 0 in :func:`numpy.std`. + + A minimum of one period is required for the rolling calculation.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + + >>> s.expanding(3).std() + 0 NaN + 1 NaN + 2 0.577350 + 3 0.957427 + 4 0.894427 + 5 0.836660 + 6 0.786796 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="standard deviation", + agg_method="std", + ) + def std( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().std( + ddof=ddof, + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements.\n + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + window_agg_numba_parameters("1.4"), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "numpy.var : Equivalent method for NumPy array.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + The default ``ddof`` of 1 used in :meth:`Series.var` is different + than the default ``ddof`` of 0 in :func:`numpy.var`. + + A minimum of one period is required for the rolling calculation.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + + >>> s.expanding(3).var() + 0 NaN + 1 NaN + 2 0.333333 + 3 0.916667 + 4 0.800000 + 5 0.700000 + 6 0.619048 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="variance", + agg_method="var", + ) + def var( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().var( + ddof=ddof, + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements.\n + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + "A minimum of one period is required for the calculation.\n\n", + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([0, 1, 2, 3]) + + >>> s.expanding().sem() + 0 NaN + 1 0.707107 + 2 0.707107 + 3 0.745356 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="standard error of mean", + agg_method="sem", + ) + def sem(self, ddof: int = 1, numeric_only: bool = False): + return super().sem(ddof=ddof, numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "scipy.stats.skew : Third moment of a probability density.\n", + template_see_also, + create_section_header("Notes"), + "A minimum of three periods is required for the rolling calculation.\n\n", + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([-1, 0, 2, -1, 2], index=['a', 'b', 'c', 'd', 'e']) + >>> ser.expanding().skew() + a NaN + b NaN + c 0.935220 + d 1.414214 + e 0.315356 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="unbiased skewness", + agg_method="skew", + ) + def skew(self, numeric_only: bool = False): + return super().skew(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "scipy.stats.kurtosis : Reference SciPy method.\n", + template_see_also, + create_section_header("Notes"), + "A minimum of four periods is required for the calculation.\n\n", + create_section_header("Examples"), + dedent( + """ + The example below will show a rolling calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> import scipy.stats + >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}") + -1.200000 + >>> print(f"{{scipy.stats.kurtosis(arr, bias=False):.6f}}") + 4.999874 + >>> s = pd.Series(arr) + >>> s.expanding(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 4.999874 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="Fisher's definition of kurtosis without bias", + agg_method="kurt", + ) + def kurt(self, numeric_only: bool = False): + return super().kurt(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + quantile : float + Quantile to compute. 0 <= quantile <= 1. + + .. deprecated:: 2.1.0 + This will be renamed to 'q' in a future version. + interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4, 5, 6], index=['a', 'b', 'c', 'd', 'e', 'f']) + >>> ser.expanding(min_periods=4).quantile(.25) + a NaN + b NaN + c NaN + d 1.75 + e 2.00 + f 2.25 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="quantile", + agg_method="quantile", + ) + @deprecate_kwarg(old_arg_name="quantile", new_arg_name="q") + def quantile( + self, + q: float, + interpolation: QuantileInterpolation = "linear", + numeric_only: bool = False, + ): + return super().quantile( + q=q, + interpolation=interpolation, + numeric_only=numeric_only, + ) + + @doc( + template_header, + ".. versionadded:: 1.4.0 \n\n", + create_section_header("Parameters"), + dedent( + """ + method : {{'average', 'min', 'max'}}, default 'average' + How to rank the group of records that have the same value (i.e. ties): + + * average: average rank of the group + * min: lowest rank in the group + * max: highest rank in the group + + ascending : bool, default True + Whether or not the elements should be ranked in ascending order. + pct : bool, default False + Whether or not to display the returned rankings in percentile + form. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([1, 4, 2, 3, 5, 3]) + >>> s.expanding().rank() + 0 1.0 + 1 2.0 + 2 2.0 + 3 3.0 + 4 5.0 + 5 3.5 + dtype: float64 + + >>> s.expanding().rank(method="max") + 0 1.0 + 1 2.0 + 2 2.0 + 3 3.0 + 4 5.0 + 5 4.0 + dtype: float64 + + >>> s.expanding().rank(method="min") + 0 1.0 + 1 2.0 + 2 2.0 + 3 3.0 + 4 5.0 + 5 3.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="expanding", + aggregation_description="rank", + agg_method="rank", + ) + def rank( + self, + method: WindowingRankType = "average", + ascending: bool = True, + pct: bool = False, + numeric_only: bool = False, + ): + return super().rank( + method=method, + ascending=ascending, + pct=pct, + numeric_only=numeric_only, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd']) + >>> ser1.expanding().cov(ser2) + a NaN + b 0.500000 + c 1.500000 + d 3.333333 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="sample covariance", + agg_method="cov", + ) + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + return super().cov( + other=other, + pairwise=pairwise, + ddof=ddof, + numeric_only=numeric_only, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + dedent( + """ + cov : Similar method to calculate covariance. + numpy.corrcoef : NumPy Pearson's correlation calculation. + """ + ).replace("\n", "", 1), + template_see_also, + create_section_header("Notes"), + dedent( + """ + This function uses Pearson's definition of correlation + (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient). + + When `other` is not specified, the output will be self correlation (e.g. + all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise` + set to `True`. + + Function will return ``NaN`` for correlations of equal valued sequences; + this is the result of a 0/0 division error. + + When `pairwise` is set to `False`, only matching columns between `self` and + `other` will be used. + + When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame + with the original index on the first level, and the `other` DataFrame + columns on the second level. + + In the case of missing elements, only complete pairwise observations + will be used.\n + """ + ), + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd']) + >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd']) + >>> ser1.expanding().corr(ser2) + a NaN + b 1.000000 + c 0.981981 + d 0.975900 + dtype: float64 + """ + ), + window_method="expanding", + aggregation_description="correlation", + agg_method="corr", + ) + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + return super().corr( + other=other, + pairwise=pairwise, + ddof=ddof, + numeric_only=numeric_only, + ) + + +class ExpandingGroupby(BaseWindowGroupby, Expanding): + """ + Provide a expanding groupby implementation. + """ + + _attributes = Expanding._attributes + BaseWindowGroupby._attributes + + def _get_window_indexer(self) -> GroupbyIndexer: + """ + Return an indexer class that will compute the window start and end bounds + + Returns + ------- + GroupbyIndexer + """ + window_indexer = GroupbyIndexer( + groupby_indices=self._grouper.indices, + window_indexer=ExpandingIndexer, + ) + return window_indexer diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/numba_.py b/venv/lib/python3.10/site-packages/pandas/core/window/numba_.py new file mode 100644 index 0000000000000000000000000000000000000000..9357945e78c631a3fada24ec3015ca0cf183b99c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/window/numba_.py @@ -0,0 +1,351 @@ +from __future__ import annotations + +import functools +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.util.numba_ import jit_user_function + +if TYPE_CHECKING: + from pandas._typing import Scalar + + +@functools.cache +def generate_numba_apply_func( + func: Callable[..., Scalar], + nopython: bool, + nogil: bool, + parallel: bool, +): + """ + Generate a numba jitted apply function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a rolling apply function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the rolling apply function. + + Parameters + ---------- + func : function + function to be applied to each window and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def roll_apply( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + *args: Any, + ) -> np.ndarray: + result = np.empty(len(begin)) + for i in numba.prange(len(result)): + start = begin[i] + stop = end[i] + window = values[start:stop] + count_nan = np.sum(np.isnan(window)) + if len(window) - count_nan >= minimum_periods: + result[i] = numba_func(window, *args) + else: + result[i] = np.nan + return result + + return roll_apply + + +@functools.cache +def generate_numba_ewm_func( + nopython: bool, + nogil: bool, + parallel: bool, + com: float, + adjust: bool, + ignore_na: bool, + deltas: tuple, + normalize: bool, +): + """ + Generate a numba jitted ewm mean or sum function specified by values + from engine_kwargs. + + Parameters + ---------- + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + com : float + adjust : bool + ignore_na : bool + deltas : tuple + normalize : bool + + Returns + ------- + Numba function + """ + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def ewm( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + ) -> np.ndarray: + result = np.empty(len(values)) + alpha = 1.0 / (1.0 + com) + old_wt_factor = 1.0 - alpha + new_wt = 1.0 if adjust else alpha + + for i in numba.prange(len(begin)): + start = begin[i] + stop = end[i] + window = values[start:stop] + sub_result = np.empty(len(window)) + + weighted = window[0] + nobs = int(not np.isnan(weighted)) + sub_result[0] = weighted if nobs >= minimum_periods else np.nan + old_wt = 1.0 + + for j in range(1, len(window)): + cur = window[j] + is_observation = not np.isnan(cur) + nobs += is_observation + if not np.isnan(weighted): + if is_observation or not ignore_na: + if normalize: + # note that len(deltas) = len(vals) - 1 and deltas[i] + # is to be used in conjunction with vals[i+1] + old_wt *= old_wt_factor ** deltas[start + j - 1] + else: + weighted = old_wt_factor * weighted + if is_observation: + if normalize: + # avoid numerical errors on constant series + if weighted != cur: + weighted = old_wt * weighted + new_wt * cur + if normalize: + weighted = weighted / (old_wt + new_wt) + if adjust: + old_wt += new_wt + else: + old_wt = 1.0 + else: + weighted += cur + elif is_observation: + weighted = cur + + sub_result[j] = weighted if nobs >= minimum_periods else np.nan + + result[start:stop] = sub_result + + return result + + return ewm + + +@functools.cache +def generate_numba_table_func( + func: Callable[..., np.ndarray], + nopython: bool, + nogil: bool, + parallel: bool, +): + """ + Generate a numba jitted function to apply window calculations table-wise. + + Func will be passed a M window size x N number of columns array, and + must return a 1 x N number of columns array. Func is intended to operate + row-wise, but the result will be transposed for axis=1. + + 1. jit the user's function + 2. Return a rolling apply function with the jitted function inline + + Parameters + ---------- + func : function + function to be applied to each window and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def roll_table( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + *args: Any, + ): + result = np.empty((len(begin), values.shape[1])) + min_periods_mask = np.empty(result.shape) + for i in numba.prange(len(result)): + start = begin[i] + stop = end[i] + window = values[start:stop] + count_nan = np.sum(np.isnan(window), axis=0) + sub_result = numba_func(window, *args) + nan_mask = len(window) - count_nan >= minimum_periods + min_periods_mask[i, :] = nan_mask + result[i, :] = sub_result + result = np.where(min_periods_mask, result, np.nan) + return result + + return roll_table + + +# This function will no longer be needed once numba supports +# axis for all np.nan* agg functions +# https://github.com/numba/numba/issues/1269 +@functools.cache +def generate_manual_numpy_nan_agg_with_axis(nan_func): + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=True, nogil=True, parallel=True) + def nan_agg_with_axis(table): + result = np.empty(table.shape[1]) + for i in numba.prange(table.shape[1]): + partition = table[:, i] + result[i] = nan_func(partition) + return result + + return nan_agg_with_axis + + +@functools.cache +def generate_numba_ewm_table_func( + nopython: bool, + nogil: bool, + parallel: bool, + com: float, + adjust: bool, + ignore_na: bool, + deltas: tuple, + normalize: bool, +): + """ + Generate a numba jitted ewm mean or sum function applied table wise specified + by values from engine_kwargs. + + Parameters + ---------- + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + com : float + adjust : bool + ignore_na : bool + deltas : tuple + normalize: bool + + Returns + ------- + Numba function + """ + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def ewm_table( + values: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + minimum_periods: int, + ) -> np.ndarray: + alpha = 1.0 / (1.0 + com) + old_wt_factor = 1.0 - alpha + new_wt = 1.0 if adjust else alpha + old_wt = np.ones(values.shape[1]) + + result = np.empty(values.shape) + weighted = values[0].copy() + nobs = (~np.isnan(weighted)).astype(np.int64) + result[0] = np.where(nobs >= minimum_periods, weighted, np.nan) + for i in range(1, len(values)): + cur = values[i] + is_observations = ~np.isnan(cur) + nobs += is_observations.astype(np.int64) + for j in numba.prange(len(cur)): + if not np.isnan(weighted[j]): + if is_observations[j] or not ignore_na: + if normalize: + # note that len(deltas) = len(vals) - 1 and deltas[i] + # is to be used in conjunction with vals[i+1] + old_wt[j] *= old_wt_factor ** deltas[i - 1] + else: + weighted[j] = old_wt_factor * weighted[j] + if is_observations[j]: + if normalize: + # avoid numerical errors on constant series + if weighted[j] != cur[j]: + weighted[j] = ( + old_wt[j] * weighted[j] + new_wt * cur[j] + ) + if normalize: + weighted[j] = weighted[j] / (old_wt[j] + new_wt) + if adjust: + old_wt[j] += new_wt + else: + old_wt[j] = 1.0 + else: + weighted[j] += cur[j] + elif is_observations[j]: + weighted[j] = cur[j] + + result[i] = np.where(nobs >= minimum_periods, weighted, np.nan) + + return result + + return ewm_table diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/online.py b/venv/lib/python3.10/site-packages/pandas/core/window/online.py new file mode 100644 index 0000000000000000000000000000000000000000..29d1f740e021fd30d3bfc7d72a5e6baefe62ac4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/window/online.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + + +def generate_online_numba_ewma_func( + nopython: bool, + nogil: bool, + parallel: bool, +): + """ + Generate a numba jitted groupby ewma function specified by values + from engine_kwargs. + + Parameters + ---------- + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def online_ewma( + values: np.ndarray, + deltas: np.ndarray, + minimum_periods: int, + old_wt_factor: float, + new_wt: float, + old_wt: np.ndarray, + adjust: bool, + ignore_na: bool, + ): + """ + Compute online exponentially weighted mean per column over 2D values. + + Takes the first observation as is, then computes the subsequent + exponentially weighted mean accounting minimum periods. + """ + result = np.empty(values.shape) + weighted_avg = values[0].copy() + nobs = (~np.isnan(weighted_avg)).astype(np.int64) + result[0] = np.where(nobs >= minimum_periods, weighted_avg, np.nan) + + for i in range(1, len(values)): + cur = values[i] + is_observations = ~np.isnan(cur) + nobs += is_observations.astype(np.int64) + for j in numba.prange(len(cur)): + if not np.isnan(weighted_avg[j]): + if is_observations[j] or not ignore_na: + # note that len(deltas) = len(vals) - 1 and deltas[i] is to be + # used in conjunction with vals[i+1] + old_wt[j] *= old_wt_factor ** deltas[j - 1] + if is_observations[j]: + # avoid numerical errors on constant series + if weighted_avg[j] != cur[j]: + weighted_avg[j] = ( + (old_wt[j] * weighted_avg[j]) + (new_wt * cur[j]) + ) / (old_wt[j] + new_wt) + if adjust: + old_wt[j] += new_wt + else: + old_wt[j] = 1.0 + elif is_observations[j]: + weighted_avg[j] = cur[j] + + result[i] = np.where(nobs >= minimum_periods, weighted_avg, np.nan) + + return result, old_wt + + return online_ewma + + +class EWMMeanState: + def __init__(self, com, adjust, ignore_na, axis, shape) -> None: + alpha = 1.0 / (1.0 + com) + self.axis = axis + self.shape = shape + self.adjust = adjust + self.ignore_na = ignore_na + self.new_wt = 1.0 if adjust else alpha + self.old_wt_factor = 1.0 - alpha + self.old_wt = np.ones(self.shape[self.axis - 1]) + self.last_ewm = None + + def run_ewm(self, weighted_avg, deltas, min_periods, ewm_func): + result, old_wt = ewm_func( + weighted_avg, + deltas, + min_periods, + self.old_wt_factor, + self.new_wt, + self.old_wt, + self.adjust, + self.ignore_na, + ) + self.old_wt = old_wt + self.last_ewm = result[-1] + return result + + def reset(self) -> None: + self.old_wt = np.ones(self.shape[self.axis - 1]) + self.last_ewm = None diff --git a/venv/lib/python3.10/site-packages/pandas/core/window/rolling.py b/venv/lib/python3.10/site-packages/pandas/core/window/rolling.py new file mode 100644 index 0000000000000000000000000000000000000000..68cec16ec9eca8b34fadd9e67221219ebd7fe736 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/core/window/rolling.py @@ -0,0 +1,2930 @@ +""" +Provide a generic structure to support window functions, +similar to how we have a Groupby object. +""" +from __future__ import annotations + +import copy +from datetime import timedelta +from functools import partial +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs.tslibs import ( + BaseOffset, + Timedelta, + to_offset, +) +import pandas._libs.window.aggregations as window_aggregations +from pandas.compat._optional import import_optional_dependency +from pandas.errors import DataError +from pandas.util._decorators import ( + deprecate_kwarg, + doc, +) + +from pandas.core.dtypes.common import ( + ensure_float64, + is_bool, + is_integer, + is_numeric_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ArrowDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import notna + +from pandas.core._numba import executor +from pandas.core.algorithms import factorize +from pandas.core.apply import ResamplerWindowApply +from pandas.core.arrays import ExtensionArray +from pandas.core.base import SelectionMixin +import pandas.core.common as com +from pandas.core.indexers.objects import ( + BaseIndexer, + FixedWindowIndexer, + GroupbyIndexer, + VariableWindowIndexer, +) +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + MultiIndex, + PeriodIndex, + TimedeltaIndex, +) +from pandas.core.reshape.concat import concat +from pandas.core.util.numba_ import ( + get_jit_arguments, + maybe_use_numba, +) +from pandas.core.window.common import ( + flex_binary_moment, + zsqrt, +) +from pandas.core.window.doc import ( + _shared_docs, + create_section_header, + kwargs_numeric_only, + kwargs_scipy, + numba_notes, + template_header, + template_returns, + template_see_also, + window_agg_numba_parameters, + window_apply_parameters, +) +from pandas.core.window.numba_ import ( + generate_manual_numpy_nan_agg_with_axis, + generate_numba_apply_func, + generate_numba_table_func, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterator, + Sized, + ) + + from pandas._typing import ( + ArrayLike, + Axis, + NDFrameT, + QuantileInterpolation, + WindowingRankType, + npt, + ) + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.generic import NDFrame + from pandas.core.groupby.ops import BaseGrouper + +from pandas.core.arrays.datetimelike import dtype_to_unit + + +class BaseWindow(SelectionMixin): + """Provides utilities for performing windowing operations.""" + + _attributes: list[str] = [] + exclusions: frozenset[Hashable] = frozenset() + _on: Index + + def __init__( + self, + obj: NDFrame, + window=None, + min_periods: int | None = None, + center: bool | None = False, + win_type: str | None = None, + axis: Axis = 0, + on: str | Index | None = None, + closed: str | None = None, + step: int | None = None, + method: str = "single", + *, + selection=None, + ) -> None: + self.obj = obj + self.on = on + self.closed = closed + self.step = step + self.window = window + self.min_periods = min_periods + self.center = center + self.win_type = win_type + self.axis = obj._get_axis_number(axis) if axis is not None else None + self.method = method + self._win_freq_i8: int | None = None + if self.on is None: + if self.axis == 0: + self._on = self.obj.index + else: + # i.e. self.axis == 1 + self._on = self.obj.columns + elif isinstance(self.on, Index): + self._on = self.on + elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns: + self._on = Index(self.obj[self.on]) + else: + raise ValueError( + f"invalid on specified as {self.on}, " + "must be a column (of DataFrame), an Index or None" + ) + + self._selection = selection + self._validate() + + def _validate(self) -> None: + if self.center is not None and not is_bool(self.center): + raise ValueError("center must be a boolean") + if self.min_periods is not None: + if not is_integer(self.min_periods): + raise ValueError("min_periods must be an integer") + if self.min_periods < 0: + raise ValueError("min_periods must be >= 0") + if is_integer(self.window) and self.min_periods > self.window: + raise ValueError( + f"min_periods {self.min_periods} must be <= window {self.window}" + ) + if self.closed is not None and self.closed not in [ + "right", + "both", + "left", + "neither", + ]: + raise ValueError("closed must be 'right', 'left', 'both' or 'neither'") + if not isinstance(self.obj, (ABCSeries, ABCDataFrame)): + raise TypeError(f"invalid type: {type(self)}") + if isinstance(self.window, BaseIndexer): + # Validate that the passed BaseIndexer subclass has + # a get_window_bounds with the correct signature. + get_window_bounds_signature = inspect.signature( + self.window.get_window_bounds + ).parameters.keys() + expected_signature = inspect.signature( + BaseIndexer().get_window_bounds + ).parameters.keys() + if get_window_bounds_signature != expected_signature: + raise ValueError( + f"{type(self.window).__name__} does not implement " + f"the correct signature for get_window_bounds" + ) + if self.method not in ["table", "single"]: + raise ValueError("method must be 'table' or 'single") + if self.step is not None: + if not is_integer(self.step): + raise ValueError("step must be an integer") + if self.step < 0: + raise ValueError("step must be >= 0") + + def _check_window_bounds( + self, start: np.ndarray, end: np.ndarray, num_vals: int + ) -> None: + if len(start) != len(end): + raise ValueError( + f"start ({len(start)}) and end ({len(end)}) bounds must be the " + f"same length" + ) + if len(start) != (num_vals + (self.step or 1) - 1) // (self.step or 1): + raise ValueError( + f"start and end bounds ({len(start)}) must be the same length " + f"as the object ({num_vals}) divided by the step ({self.step}) " + f"if given and rounded up" + ) + + def _slice_axis_for_step(self, index: Index, result: Sized | None = None) -> Index: + """ + Slices the index for a given result and the preset step. + """ + return ( + index + if result is None or len(result) == len(index) + else index[:: self.step] + ) + + def _validate_numeric_only(self, name: str, numeric_only: bool) -> None: + """ + Validate numeric_only argument, raising if invalid for the input. + + Parameters + ---------- + name : str + Name of the operator (kernel). + numeric_only : bool + Value passed by user. + """ + if ( + self._selected_obj.ndim == 1 + and numeric_only + and not is_numeric_dtype(self._selected_obj.dtype) + ): + raise NotImplementedError( + f"{type(self).__name__}.{name} does not implement numeric_only" + ) + + def _make_numeric_only(self, obj: NDFrameT) -> NDFrameT: + """Subset DataFrame to numeric columns. + + Parameters + ---------- + obj : DataFrame + + Returns + ------- + obj subset to numeric-only columns. + """ + result = obj.select_dtypes(include=["number"], exclude=["timedelta"]) + return result + + def _create_data(self, obj: NDFrameT, numeric_only: bool = False) -> NDFrameT: + """ + Split data into blocks & return conformed data. + """ + # filter out the on from the object + if self.on is not None and not isinstance(self.on, Index) and obj.ndim == 2: + obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False) + if obj.ndim > 1 and (numeric_only or self.axis == 1): + # GH: 20649 in case of mixed dtype and axis=1 we have to convert everything + # to float to calculate the complete row at once. We exclude all non-numeric + # dtypes. + obj = self._make_numeric_only(obj) + if self.axis == 1: + obj = obj.astype("float64", copy=False) + obj._mgr = obj._mgr.consolidate() + return obj + + def _gotitem(self, key, ndim, subset=None): + """ + Sub-classes to define. Return a sliced object. + + Parameters + ---------- + key : str / list of selections + ndim : {1, 2} + requested ndim of result + subset : object, default None + subset to act on + """ + # create a new object to prevent aliasing + if subset is None: + subset = self.obj + + # we need to make a shallow copy of ourselves + # with the same groupby + kwargs = {attr: getattr(self, attr) for attr in self._attributes} + + selection = self._infer_selection(key, subset) + new_win = type(self)(subset, selection=selection, **kwargs) + return new_win + + def __getattr__(self, attr: str): + if attr in self._internal_names_set: + return object.__getattribute__(self, attr) + if attr in self.obj: + return self[attr] + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{attr}'" + ) + + def _dir_additions(self): + return self.obj._dir_additions() + + def __repr__(self) -> str: + """ + Provide a nice str repr of our rolling object. + """ + attrs_list = ( + f"{attr_name}={getattr(self, attr_name)}" + for attr_name in self._attributes + if getattr(self, attr_name, None) is not None and attr_name[0] != "_" + ) + attrs = ",".join(attrs_list) + return f"{type(self).__name__} [{attrs}]" + + def __iter__(self) -> Iterator: + obj = self._selected_obj.set_axis(self._on) + obj = self._create_data(obj) + indexer = self._get_window_indexer() + + start, end = indexer.get_window_bounds( + num_values=len(obj), + min_periods=self.min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(obj)) + + for s, e in zip(start, end): + result = obj.iloc[slice(s, e)] + yield result + + def _prep_values(self, values: ArrayLike) -> np.ndarray: + """Convert input to numpy arrays for Cython routines""" + if needs_i8_conversion(values.dtype): + raise NotImplementedError( + f"ops for {type(self).__name__} for this " + f"dtype {values.dtype} are not implemented" + ) + # GH #12373 : rolling functions error on float32 data + # make sure the data is coerced to float64 + try: + if isinstance(values, ExtensionArray): + values = values.to_numpy(np.float64, na_value=np.nan) + else: + values = ensure_float64(values) + except (ValueError, TypeError) as err: + raise TypeError(f"cannot handle this type -> {values.dtype}") from err + + # Convert inf to nan for C funcs + inf = np.isinf(values) + if inf.any(): + values = np.where(inf, np.nan, values) + + return values + + def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None: + # if we have an 'on' column we want to put it back into + # the results in the same location + from pandas import Series + + if self.on is not None and not self._on.equals(obj.index): + name = self._on.name + extra_col = Series(self._on, index=self.obj.index, name=name, copy=False) + if name in result.columns: + # TODO: sure we want to overwrite results? + result[name] = extra_col + elif name in result.index.names: + pass + elif name in self._selected_obj.columns: + # insert in the same location as we had in _selected_obj + old_cols = self._selected_obj.columns + new_cols = result.columns + old_loc = old_cols.get_loc(name) + overlap = new_cols.intersection(old_cols[:old_loc]) + new_loc = len(overlap) + result.insert(new_loc, name, extra_col) + else: + # insert at the end + result[name] = extra_col + + @property + def _index_array(self) -> npt.NDArray[np.int64] | None: + # TODO: why do we get here with e.g. MultiIndex? + if isinstance(self._on, (PeriodIndex, DatetimeIndex, TimedeltaIndex)): + return self._on.asi8 + elif isinstance(self._on.dtype, ArrowDtype) and self._on.dtype.kind in "mM": + return self._on.to_numpy(dtype=np.int64) + return None + + def _resolve_output(self, out: DataFrame, obj: DataFrame) -> DataFrame: + """Validate and finalize result.""" + if out.shape[1] == 0 and obj.shape[1] > 0: + raise DataError("No numeric types to aggregate") + if out.shape[1] == 0: + return obj.astype("float64") + + self._insert_on_column(out, obj) + return out + + def _get_window_indexer(self) -> BaseIndexer: + """ + Return an indexer class that will compute the window start and end bounds + """ + if isinstance(self.window, BaseIndexer): + return self.window + if self._win_freq_i8 is not None: + return VariableWindowIndexer( + index_array=self._index_array, + window_size=self._win_freq_i8, + center=self.center, + ) + return FixedWindowIndexer(window_size=self.window) + + def _apply_series( + self, homogeneous_func: Callable[..., ArrayLike], name: str | None = None + ) -> Series: + """ + Series version of _apply_columnwise + """ + obj = self._create_data(self._selected_obj) + + if name == "count": + # GH 12541: Special case for count where we support date-like types + obj = notna(obj).astype(int) + try: + values = self._prep_values(obj._values) + except (TypeError, NotImplementedError) as err: + raise DataError("No numeric types to aggregate") from err + + result = homogeneous_func(values) + index = self._slice_axis_for_step(obj.index, result) + return obj._constructor(result, index=index, name=obj.name) + + def _apply_columnwise( + self, + homogeneous_func: Callable[..., ArrayLike], + name: str, + numeric_only: bool = False, + ) -> DataFrame | Series: + """ + Apply the given function to the DataFrame broken down into homogeneous + sub-frames. + """ + self._validate_numeric_only(name, numeric_only) + if self._selected_obj.ndim == 1: + return self._apply_series(homogeneous_func, name) + + obj = self._create_data(self._selected_obj, numeric_only) + if name == "count": + # GH 12541: Special case for count where we support date-like types + obj = notna(obj).astype(int) + obj._mgr = obj._mgr.consolidate() + + if self.axis == 1: + obj = obj.T + + taker = [] + res_values = [] + for i, arr in enumerate(obj._iter_column_arrays()): + # GH#42736 operate column-wise instead of block-wise + # As of 2.0, hfunc will raise for nuisance columns + try: + arr = self._prep_values(arr) + except (TypeError, NotImplementedError) as err: + raise DataError( + f"Cannot aggregate non-numeric type: {arr.dtype}" + ) from err + res = homogeneous_func(arr) + res_values.append(res) + taker.append(i) + + index = self._slice_axis_for_step( + obj.index, res_values[0] if len(res_values) > 0 else None + ) + df = type(obj)._from_arrays( + res_values, + index=index, + columns=obj.columns.take(taker), + verify_integrity=False, + ) + + if self.axis == 1: + df = df.T + + return self._resolve_output(df, obj) + + def _apply_tablewise( + self, + homogeneous_func: Callable[..., ArrayLike], + name: str | None = None, + numeric_only: bool = False, + ) -> DataFrame | Series: + """ + Apply the given function to the DataFrame across the entire object + """ + if self._selected_obj.ndim == 1: + raise ValueError("method='table' not applicable for Series objects.") + obj = self._create_data(self._selected_obj, numeric_only) + values = self._prep_values(obj.to_numpy()) + values = values.T if self.axis == 1 else values + result = homogeneous_func(values) + result = result.T if self.axis == 1 else result + index = self._slice_axis_for_step(obj.index, result) + columns = ( + obj.columns + if result.shape[1] == len(obj.columns) + else obj.columns[:: self.step] + ) + out = obj._constructor(result, index=index, columns=columns) + + return self._resolve_output(out, obj) + + def _apply_pairwise( + self, + target: DataFrame | Series, + other: DataFrame | Series | None, + pairwise: bool | None, + func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series], + numeric_only: bool, + ) -> DataFrame | Series: + """ + Apply the given pairwise function given 2 pandas objects (DataFrame/Series) + """ + target = self._create_data(target, numeric_only) + if other is None: + other = target + # only default unset + pairwise = True if pairwise is None else pairwise + elif not isinstance(other, (ABCDataFrame, ABCSeries)): + raise ValueError("other must be a DataFrame or Series") + elif other.ndim == 2 and numeric_only: + other = self._make_numeric_only(other) + + return flex_binary_moment(target, other, func, pairwise=bool(pairwise)) + + def _apply( + self, + func: Callable[..., Any], + name: str, + numeric_only: bool = False, + numba_args: tuple[Any, ...] = (), + **kwargs, + ): + """ + Rolling statistical measure using supplied function. + + Designed to be used with passed-in Cython array-based functions. + + Parameters + ---------- + func : callable function to apply + name : str, + numba_args : tuple + args to be passed when func is a numba func + **kwargs + additional arguments for rolling function and window function + + Returns + ------- + y : type of input + """ + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + + def homogeneous_func(values: np.ndarray): + # calculation function + + if values.size == 0: + return values.copy() + + def calc(x): + start, end = window_indexer.get_window_bounds( + num_values=len(x), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(x)) + + return func(x, start, end, min_periods, *numba_args) + + with np.errstate(all="ignore"): + result = calc(values) + + return result + + if self.method == "single": + return self._apply_columnwise(homogeneous_func, name, numeric_only) + else: + return self._apply_tablewise(homogeneous_func, name, numeric_only) + + def _numba_apply( + self, + func: Callable[..., Any], + engine_kwargs: dict[str, bool] | None = None, + **func_kwargs, + ): + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + obj = self._create_data(self._selected_obj) + if self.axis == 1: + obj = obj.T + values = self._prep_values(obj.to_numpy()) + if values.ndim == 1: + values = values.reshape(-1, 1) + start, end = window_indexer.get_window_bounds( + num_values=len(values), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(values)) + # For now, map everything to float to match the Cython impl + # even though it is wrong + # TODO: Could preserve correct dtypes in future + # xref #53214 + dtype_mapping = executor.float_dtype_mapping + aggregator = executor.generate_shared_aggregator( + func, + dtype_mapping, + is_grouped_kernel=False, + **get_jit_arguments(engine_kwargs), + ) + result = aggregator( + values.T, start=start, end=end, min_periods=min_periods, **func_kwargs + ).T + result = result.T if self.axis == 1 else result + index = self._slice_axis_for_step(obj.index, result) + if obj.ndim == 1: + result = result.squeeze() + out = obj._constructor(result, index=index, name=obj.name) + return out + else: + columns = self._slice_axis_for_step(obj.columns, result.T) + out = obj._constructor(result, index=index, columns=columns) + return self._resolve_output(out, obj) + + def aggregate(self, func, *args, **kwargs): + result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() + if result is None: + return self.apply(func, raw=False, args=args, kwargs=kwargs) + return result + + agg = aggregate + + +class BaseWindowGroupby(BaseWindow): + """ + Provide the groupby windowing facilities. + """ + + _grouper: BaseGrouper + _as_index: bool + _attributes: list[str] = ["_grouper"] + + def __init__( + self, + obj: DataFrame | Series, + *args, + _grouper: BaseGrouper, + _as_index: bool = True, + **kwargs, + ) -> None: + from pandas.core.groupby.ops import BaseGrouper + + if not isinstance(_grouper, BaseGrouper): + raise ValueError("Must pass a BaseGrouper object.") + self._grouper = _grouper + self._as_index = _as_index + # GH 32262: It's convention to keep the grouping column in + # groupby., but unexpected to users in + # groupby.rolling. + obj = obj.drop(columns=self._grouper.names, errors="ignore") + # GH 15354 + if kwargs.get("step") is not None: + raise NotImplementedError("step not implemented for groupby") + super().__init__(obj, *args, **kwargs) + + def _apply( + self, + func: Callable[..., Any], + name: str, + numeric_only: bool = False, + numba_args: tuple[Any, ...] = (), + **kwargs, + ) -> DataFrame | Series: + result = super()._apply( + func, + name, + numeric_only, + numba_args, + **kwargs, + ) + # Reconstruct the resulting MultiIndex + # 1st set of levels = group by labels + # 2nd set of levels = original DataFrame/Series index + grouped_object_index = self.obj.index + grouped_index_name = [*grouped_object_index.names] + groupby_keys = copy.copy(self._grouper.names) + result_index_names = groupby_keys + grouped_index_name + + drop_columns = [ + key + for key in self._grouper.names + if key not in self.obj.index.names or key is None + ] + + if len(drop_columns) != len(groupby_keys): + # Our result will have still kept the column in the result + result = result.drop(columns=drop_columns, errors="ignore") + + codes = self._grouper.codes + levels = copy.copy(self._grouper.levels) + + group_indices = self._grouper.indices.values() + if group_indices: + indexer = np.concatenate(list(group_indices)) + else: + indexer = np.array([], dtype=np.intp) + codes = [c.take(indexer) for c in codes] + + # if the index of the original dataframe needs to be preserved, append + # this index (but reordered) to the codes/levels from the groupby + if grouped_object_index is not None: + idx = grouped_object_index.take(indexer) + if not isinstance(idx, MultiIndex): + idx = MultiIndex.from_arrays([idx]) + codes.extend(list(idx.codes)) + levels.extend(list(idx.levels)) + + result_index = MultiIndex( + levels, codes, names=result_index_names, verify_integrity=False + ) + + result.index = result_index + if not self._as_index: + result = result.reset_index(level=list(range(len(groupby_keys)))) + return result + + def _apply_pairwise( + self, + target: DataFrame | Series, + other: DataFrame | Series | None, + pairwise: bool | None, + func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series], + numeric_only: bool, + ) -> DataFrame | Series: + """ + Apply the given pairwise function given 2 pandas objects (DataFrame/Series) + """ + # Manually drop the grouping column first + target = target.drop(columns=self._grouper.names, errors="ignore") + result = super()._apply_pairwise(target, other, pairwise, func, numeric_only) + # 1) Determine the levels + codes of the groupby levels + if other is not None and not all( + len(group) == len(other) for group in self._grouper.indices.values() + ): + # GH 42915 + # len(other) != len(any group), so must reindex (expand) the result + # from flex_binary_moment to a "transform"-like result + # per groupby combination + old_result_len = len(result) + result = concat( + [ + result.take(gb_indices).reindex(result.index) + for gb_indices in self._grouper.indices.values() + ] + ) + + gb_pairs = ( + com.maybe_make_list(pair) for pair in self._grouper.indices.keys() + ) + groupby_codes = [] + groupby_levels = [] + # e.g. [[1, 2], [4, 5]] as [[1, 4], [2, 5]] + for gb_level_pair in map(list, zip(*gb_pairs)): + labels = np.repeat(np.array(gb_level_pair), old_result_len) + codes, levels = factorize(labels) + groupby_codes.append(codes) + groupby_levels.append(levels) + else: + # pairwise=True or len(other) == len(each group), so repeat + # the groupby labels by the number of columns in the original object + groupby_codes = self._grouper.codes + # error: Incompatible types in assignment (expression has type + # "List[Index]", variable has type "List[Union[ndarray, Index]]") + groupby_levels = self._grouper.levels # type: ignore[assignment] + + group_indices = self._grouper.indices.values() + if group_indices: + indexer = np.concatenate(list(group_indices)) + else: + indexer = np.array([], dtype=np.intp) + + if target.ndim == 1: + repeat_by = 1 + else: + repeat_by = len(target.columns) + groupby_codes = [ + np.repeat(c.take(indexer), repeat_by) for c in groupby_codes + ] + # 2) Determine the levels + codes of the result from super()._apply_pairwise + if isinstance(result.index, MultiIndex): + result_codes = list(result.index.codes) + result_levels = list(result.index.levels) + result_names = list(result.index.names) + else: + idx_codes, idx_levels = factorize(result.index) + result_codes = [idx_codes] + result_levels = [idx_levels] + result_names = [result.index.name] + + # 3) Create the resulting index by combining 1) + 2) + result_codes = groupby_codes + result_codes + result_levels = groupby_levels + result_levels + result_names = self._grouper.names + result_names + + result_index = MultiIndex( + result_levels, result_codes, names=result_names, verify_integrity=False + ) + result.index = result_index + return result + + def _create_data(self, obj: NDFrameT, numeric_only: bool = False) -> NDFrameT: + """ + Split data into blocks & return conformed data. + """ + # Ensure the object we're rolling over is monotonically sorted relative + # to the groups + # GH 36197 + if not obj.empty: + groupby_order = np.concatenate(list(self._grouper.indices.values())).astype( + np.int64 + ) + obj = obj.take(groupby_order) + return super()._create_data(obj, numeric_only) + + def _gotitem(self, key, ndim, subset=None): + # we are setting the index on the actual object + # here so our index is carried through to the selected obj + # when we do the splitting for the groupby + if self.on is not None: + # GH 43355 + subset = self.obj.set_index(self._on) + return super()._gotitem(key, ndim, subset=subset) + + +class Window(BaseWindow): + """ + Provide rolling window calculations. + + Parameters + ---------- + window : int, timedelta, str, offset, or BaseIndexer subclass + Size of the moving window. + + If an integer, the fixed number of observations used for + each window. + + If a timedelta, str, or offset, the time period of each window. Each + window will be a variable sized based on the observations included in + the time-period. This is only valid for datetimelike indexes. + To learn more about the offsets & frequency strings, please see `this link + `__. + + If a BaseIndexer subclass, the window boundaries + based on the defined ``get_window_bounds`` method. Additional rolling + keyword arguments, namely ``min_periods``, ``center``, ``closed`` and + ``step`` will be passed to ``get_window_bounds``. + + min_periods : int, default None + Minimum number of observations in window required to have a value; + otherwise, result is ``np.nan``. + + For a window that is specified by an offset, ``min_periods`` will default to 1. + + For a window that is specified by an integer, ``min_periods`` will default + to the size of the window. + + center : bool, default False + If False, set the window labels as the right edge of the window index. + + If True, set the window labels as the center of the window index. + + win_type : str, default None + If ``None``, all points are evenly weighted. + + If a string, it must be a valid `scipy.signal window function + `__. + + Certain Scipy window types require additional parameters to be passed + in the aggregation function. The additional parameters must match + the keywords specified in the Scipy window type method signature. + + on : str, optional + For a DataFrame, a column label or Index level on which + to calculate the rolling window, rather than the DataFrame's index. + + Provided integer column is ignored and excluded from result since + an integer index is not used to calculate the rolling window. + + axis : int or str, default 0 + If ``0`` or ``'index'``, roll across the rows. + + If ``1`` or ``'columns'``, roll across the columns. + + For `Series` this parameter is unused and defaults to 0. + + .. deprecated:: 2.1.0 + + The axis keyword is deprecated. For ``axis=1``, + transpose the DataFrame first instead. + + closed : str, default None + If ``'right'``, the first point in the window is excluded from calculations. + + If ``'left'``, the last point in the window is excluded from calculations. + + If ``'both'``, the no points in the window are excluded from calculations. + + If ``'neither'``, the first and last points in the window are excluded + from calculations. + + Default ``None`` (``'right'``). + + step : int, default None + + .. versionadded:: 1.5.0 + + Evaluate the window at every ``step`` result, equivalent to slicing as + ``[::step]``. ``window`` must be an integer. Using a step argument other + than None or 1 will produce a result with a different shape than the input. + + method : str {'single', 'table'}, default 'single' + + .. versionadded:: 1.3.0 + + Execute the rolling operation per single column or row (``'single'``) + or over the entire object (``'table'``). + + This argument is only implemented when specifying ``engine='numba'`` + in the method call. + + Returns + ------- + pandas.api.typing.Window or pandas.api.typing.Rolling + An instance of Window is returned if ``win_type`` is passed. Otherwise, + an instance of Rolling is returned. + + See Also + -------- + expanding : Provides expanding transformations. + ewm : Provides exponential weighted functions. + + Notes + ----- + See :ref:`Windowing Operations ` for further usage details + and examples. + + Examples + -------- + >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}) + >>> df + B + 0 0.0 + 1 1.0 + 2 2.0 + 3 NaN + 4 4.0 + + **window** + + Rolling sum with a window length of 2 observations. + + >>> df.rolling(2).sum() + B + 0 NaN + 1 1.0 + 2 3.0 + 3 NaN + 4 NaN + + Rolling sum with a window span of 2 seconds. + + >>> df_time = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}, + ... index=[pd.Timestamp('20130101 09:00:00'), + ... pd.Timestamp('20130101 09:00:02'), + ... pd.Timestamp('20130101 09:00:03'), + ... pd.Timestamp('20130101 09:00:05'), + ... pd.Timestamp('20130101 09:00:06')]) + + >>> df_time + B + 2013-01-01 09:00:00 0.0 + 2013-01-01 09:00:02 1.0 + 2013-01-01 09:00:03 2.0 + 2013-01-01 09:00:05 NaN + 2013-01-01 09:00:06 4.0 + + >>> df_time.rolling('2s').sum() + B + 2013-01-01 09:00:00 0.0 + 2013-01-01 09:00:02 1.0 + 2013-01-01 09:00:03 3.0 + 2013-01-01 09:00:05 NaN + 2013-01-01 09:00:06 4.0 + + Rolling sum with forward looking windows with 2 observations. + + >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2) + >>> df.rolling(window=indexer, min_periods=1).sum() + B + 0 1.0 + 1 3.0 + 2 2.0 + 3 4.0 + 4 4.0 + + **min_periods** + + Rolling sum with a window length of 2 observations, but only needs a minimum of 1 + observation to calculate a value. + + >>> df.rolling(2, min_periods=1).sum() + B + 0 0.0 + 1 1.0 + 2 3.0 + 3 2.0 + 4 4.0 + + **center** + + Rolling sum with the result assigned to the center of the window index. + + >>> df.rolling(3, min_periods=1, center=True).sum() + B + 0 1.0 + 1 3.0 + 2 3.0 + 3 6.0 + 4 4.0 + + >>> df.rolling(3, min_periods=1, center=False).sum() + B + 0 0.0 + 1 1.0 + 2 3.0 + 3 3.0 + 4 6.0 + + **step** + + Rolling sum with a window length of 2 observations, minimum of 1 observation to + calculate a value, and a step of 2. + + >>> df.rolling(2, min_periods=1, step=2).sum() + B + 0 0.0 + 2 3.0 + 4 4.0 + + **win_type** + + Rolling sum with a window length of 2, using the Scipy ``'gaussian'`` + window type. ``std`` is required in the aggregation function. + + >>> df.rolling(2, win_type='gaussian').sum(std=3) + B + 0 NaN + 1 0.986207 + 2 2.958621 + 3 NaN + 4 NaN + + **on** + + Rolling sum with a window length of 2 days. + + >>> df = pd.DataFrame({ + ... 'A': [pd.to_datetime('2020-01-01'), + ... pd.to_datetime('2020-01-01'), + ... pd.to_datetime('2020-01-02'),], + ... 'B': [1, 2, 3], }, + ... index=pd.date_range('2020', periods=3)) + + >>> df + A B + 2020-01-01 2020-01-01 1 + 2020-01-02 2020-01-01 2 + 2020-01-03 2020-01-02 3 + + >>> df.rolling('2D', on='A').sum() + A B + 2020-01-01 2020-01-01 1.0 + 2020-01-02 2020-01-01 3.0 + 2020-01-03 2020-01-02 6.0 + """ + + _attributes = [ + "window", + "min_periods", + "center", + "win_type", + "axis", + "on", + "closed", + "step", + "method", + ] + + def _validate(self): + super()._validate() + + if not isinstance(self.win_type, str): + raise ValueError(f"Invalid win_type {self.win_type}") + signal = import_optional_dependency( + "scipy.signal.windows", extra="Scipy is required to generate window weight." + ) + self._scipy_weight_generator = getattr(signal, self.win_type, None) + if self._scipy_weight_generator is None: + raise ValueError(f"Invalid win_type {self.win_type}") + + if isinstance(self.window, BaseIndexer): + raise NotImplementedError( + "BaseIndexer subclasses not implemented with win_types." + ) + if not is_integer(self.window) or self.window < 0: + raise ValueError("window must be an integer 0 or greater") + + if self.method != "single": + raise NotImplementedError("'single' is the only supported method type.") + + def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray: + """ + Center the result in the window for weighted rolling aggregations. + """ + if offset > 0: + lead_indexer = [slice(offset, None)] + result = np.copy(result[tuple(lead_indexer)]) + return result + + def _apply( + self, + func: Callable[[np.ndarray, int, int], np.ndarray], + name: str, + numeric_only: bool = False, + numba_args: tuple[Any, ...] = (), + **kwargs, + ): + """ + Rolling with weights statistical measure using supplied function. + + Designed to be used with passed-in Cython array-based functions. + + Parameters + ---------- + func : callable function to apply + name : str, + numeric_only : bool, default False + Whether to only operate on bool, int, and float columns + numba_args : tuple + unused + **kwargs + additional arguments for scipy windows if necessary + + Returns + ------- + y : type of input + """ + # "None" not callable [misc] + window = self._scipy_weight_generator( # type: ignore[misc] + self.window, **kwargs + ) + offset = (len(window) - 1) // 2 if self.center else 0 + + def homogeneous_func(values: np.ndarray): + # calculation function + + if values.size == 0: + return values.copy() + + def calc(x): + additional_nans = np.array([np.nan] * offset) + x = np.concatenate((x, additional_nans)) + return func( + x, + window, + self.min_periods if self.min_periods is not None else len(window), + ) + + with np.errstate(all="ignore"): + # Our weighted aggregations return memoryviews + result = np.asarray(calc(values)) + + if self.center: + result = self._center_window(result, offset) + + return result + + return self._apply_columnwise(homogeneous_func, name, numeric_only)[ + :: self.step + ] + + @doc( + _shared_docs["aggregate"], + see_also=dedent( + """ + See Also + -------- + pandas.DataFrame.aggregate : Similar DataFrame method. + pandas.Series.aggregate : Similar Series method. + """ + ), + examples=dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.rolling(2, win_type="boxcar").agg("mean") + A B C + 0 NaN NaN NaN + 1 1.5 4.5 7.5 + 2 2.5 5.5 8.5 + """ + ), + klass="Series/DataFrame", + axis="", + ) + def aggregate(self, func, *args, **kwargs): + result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() + if result is None: + # these must apply directly + result = func(self) + + return result + + agg = aggregate + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + kwargs_scipy, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([0, 1, 5, 2, 8]) + + To get an instance of :class:`~pandas.core.window.rolling.Window` we need + to pass the parameter `win_type`. + + >>> type(ser.rolling(2, win_type='gaussian')) + + + In order to use the `SciPy` Gaussian window we need to provide the parameters + `M` and `std`. The parameter `M` corresponds to 2 in our example. + We pass the second parameter `std` as a parameter of the following method + (`sum` in this case): + + >>> ser.rolling(2, win_type='gaussian').sum(std=3) + 0 NaN + 1 0.986207 + 2 5.917243 + 3 6.903450 + 4 9.862071 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="weighted window sum", + agg_method="sum", + ) + def sum(self, numeric_only: bool = False, **kwargs): + window_func = window_aggregations.roll_weighted_sum + # error: Argument 1 to "_apply" of "Window" has incompatible type + # "Callable[[ndarray, ndarray, int], ndarray]"; expected + # "Callable[[ndarray, int, int], ndarray]" + return self._apply( + window_func, # type: ignore[arg-type] + name="sum", + numeric_only=numeric_only, + **kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + kwargs_scipy, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([0, 1, 5, 2, 8]) + + To get an instance of :class:`~pandas.core.window.rolling.Window` we need + to pass the parameter `win_type`. + + >>> type(ser.rolling(2, win_type='gaussian')) + + + In order to use the `SciPy` Gaussian window we need to provide the parameters + `M` and `std`. The parameter `M` corresponds to 2 in our example. + We pass the second parameter `std` as a parameter of the following method: + + >>> ser.rolling(2, win_type='gaussian').mean(std=3) + 0 NaN + 1 0.5 + 2 3.0 + 3 3.5 + 4 5.0 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="weighted window mean", + agg_method="mean", + ) + def mean(self, numeric_only: bool = False, **kwargs): + window_func = window_aggregations.roll_weighted_mean + # error: Argument 1 to "_apply" of "Window" has incompatible type + # "Callable[[ndarray, ndarray, int], ndarray]"; expected + # "Callable[[ndarray, int, int], ndarray]" + return self._apply( + window_func, # type: ignore[arg-type] + name="mean", + numeric_only=numeric_only, + **kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + kwargs_scipy, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([0, 1, 5, 2, 8]) + + To get an instance of :class:`~pandas.core.window.rolling.Window` we need + to pass the parameter `win_type`. + + >>> type(ser.rolling(2, win_type='gaussian')) + + + In order to use the `SciPy` Gaussian window we need to provide the parameters + `M` and `std`. The parameter `M` corresponds to 2 in our example. + We pass the second parameter `std` as a parameter of the following method: + + >>> ser.rolling(2, win_type='gaussian').var(std=3) + 0 NaN + 1 0.5 + 2 8.0 + 3 4.5 + 4 18.0 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="weighted window variance", + agg_method="var", + ) + def var(self, ddof: int = 1, numeric_only: bool = False, **kwargs): + window_func = partial(window_aggregations.roll_weighted_var, ddof=ddof) + kwargs.pop("name", None) + return self._apply(window_func, name="var", numeric_only=numeric_only, **kwargs) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + kwargs_scipy, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([0, 1, 5, 2, 8]) + + To get an instance of :class:`~pandas.core.window.rolling.Window` we need + to pass the parameter `win_type`. + + >>> type(ser.rolling(2, win_type='gaussian')) + + + In order to use the `SciPy` Gaussian window we need to provide the parameters + `M` and `std`. The parameter `M` corresponds to 2 in our example. + We pass the second parameter `std` as a parameter of the following method: + + >>> ser.rolling(2, win_type='gaussian').std(std=3) + 0 NaN + 1 0.707107 + 2 2.828427 + 3 2.121320 + 4 4.242641 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="weighted window standard deviation", + agg_method="std", + ) + def std(self, ddof: int = 1, numeric_only: bool = False, **kwargs): + return zsqrt( + self.var(ddof=ddof, name="std", numeric_only=numeric_only, **kwargs) + ) + + +class RollingAndExpandingMixin(BaseWindow): + def count(self, numeric_only: bool = False): + window_func = window_aggregations.roll_sum + return self._apply(window_func, name="count", numeric_only=numeric_only) + + def apply( + self, + func: Callable[..., Any], + raw: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + args: tuple[Any, ...] | None = None, + kwargs: dict[str, Any] | None = None, + ): + if args is None: + args = () + if kwargs is None: + kwargs = {} + + if not is_bool(raw): + raise ValueError("raw parameter must be `True` or `False`") + + numba_args: tuple[Any, ...] = () + if maybe_use_numba(engine): + if raw is False: + raise ValueError("raw must be `True` when using the numba engine") + numba_args = args + if self.method == "single": + apply_func = generate_numba_apply_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + else: + apply_func = generate_numba_table_func( + func, **get_jit_arguments(engine_kwargs, kwargs) + ) + elif engine in ("cython", None): + if engine_kwargs is not None: + raise ValueError("cython engine does not accept engine_kwargs") + apply_func = self._generate_cython_apply_func(args, kwargs, raw, func) + else: + raise ValueError("engine must be either 'numba' or 'cython'") + + return self._apply( + apply_func, + name="apply", + numba_args=numba_args, + ) + + def _generate_cython_apply_func( + self, + args: tuple[Any, ...], + kwargs: dict[str, Any], + raw: bool | np.bool_, + function: Callable[..., Any], + ) -> Callable[[np.ndarray, np.ndarray, np.ndarray, int], np.ndarray]: + from pandas import Series + + window_func = partial( + window_aggregations.roll_apply, + args=args, + kwargs=kwargs, + raw=raw, + function=function, + ) + + def apply_func(values, begin, end, min_periods, raw=raw): + if not raw: + # GH 45912 + values = Series(values, index=self._on, copy=False) + return window_func(values, begin, end, min_periods) + + return apply_func + + def sum( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nansum) + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + else: + from pandas.core._numba.kernels import sliding_sum + + return self._numba_apply(sliding_sum, engine_kwargs) + window_func = window_aggregations.roll_sum + return self._apply(window_func, name="sum", numeric_only=numeric_only) + + def max( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nanmax) + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + else: + from pandas.core._numba.kernels import sliding_min_max + + return self._numba_apply(sliding_min_max, engine_kwargs, is_max=True) + window_func = window_aggregations.roll_max + return self._apply(window_func, name="max", numeric_only=numeric_only) + + def min( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nanmin) + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + else: + from pandas.core._numba.kernels import sliding_min_max + + return self._numba_apply(sliding_min_max, engine_kwargs, is_max=False) + window_func = window_aggregations.roll_min + return self._apply(window_func, name="min", numeric_only=numeric_only) + + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nanmean) + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + else: + from pandas.core._numba.kernels import sliding_mean + + return self._numba_apply(sliding_mean, engine_kwargs) + window_func = window_aggregations.roll_mean + return self._apply(window_func, name="mean", numeric_only=numeric_only) + + def median( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + func = generate_manual_numpy_nan_agg_with_axis(np.nanmedian) + else: + func = np.nanmedian + + return self.apply( + func, + raw=True, + engine=engine, + engine_kwargs=engine_kwargs, + ) + window_func = window_aggregations.roll_median_c + return self._apply(window_func, name="median", numeric_only=numeric_only) + + def std( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + raise NotImplementedError("std not supported with method='table'") + from pandas.core._numba.kernels import sliding_var + + return zsqrt(self._numba_apply(sliding_var, engine_kwargs, ddof=ddof)) + window_func = window_aggregations.roll_var + + def zsqrt_func(values, begin, end, min_periods): + return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof)) + + return self._apply( + zsqrt_func, + name="std", + numeric_only=numeric_only, + ) + + def var( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + if maybe_use_numba(engine): + if self.method == "table": + raise NotImplementedError("var not supported with method='table'") + from pandas.core._numba.kernels import sliding_var + + return self._numba_apply(sliding_var, engine_kwargs, ddof=ddof) + window_func = partial(window_aggregations.roll_var, ddof=ddof) + return self._apply( + window_func, + name="var", + numeric_only=numeric_only, + ) + + def skew(self, numeric_only: bool = False): + window_func = window_aggregations.roll_skew + return self._apply( + window_func, + name="skew", + numeric_only=numeric_only, + ) + + def sem(self, ddof: int = 1, numeric_only: bool = False): + # Raise here so error message says sem instead of std + self._validate_numeric_only("sem", numeric_only) + return self.std(numeric_only=numeric_only) / ( + self.count(numeric_only=numeric_only) - ddof + ).pow(0.5) + + def kurt(self, numeric_only: bool = False): + window_func = window_aggregations.roll_kurt + return self._apply( + window_func, + name="kurt", + numeric_only=numeric_only, + ) + + def quantile( + self, + q: float, + interpolation: QuantileInterpolation = "linear", + numeric_only: bool = False, + ): + if q == 1.0: + window_func = window_aggregations.roll_max + elif q == 0.0: + window_func = window_aggregations.roll_min + else: + window_func = partial( + window_aggregations.roll_quantile, + quantile=q, + interpolation=interpolation, + ) + + return self._apply(window_func, name="quantile", numeric_only=numeric_only) + + def rank( + self, + method: WindowingRankType = "average", + ascending: bool = True, + pct: bool = False, + numeric_only: bool = False, + ): + window_func = partial( + window_aggregations.roll_rank, + method=method, + ascending=ascending, + percentile=pct, + ) + + return self._apply(window_func, name="rank", numeric_only=numeric_only) + + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + if self.step is not None: + raise NotImplementedError("step not implemented for cov") + self._validate_numeric_only("cov", numeric_only) + + from pandas import Series + + def cov_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + start, end = window_indexer.get_window_bounds( + num_values=len(x_array), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(x_array)) + + with np.errstate(all="ignore"): + mean_x_y = window_aggregations.roll_mean( + x_array * y_array, start, end, min_periods + ) + mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods) + mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods) + count_x_y = window_aggregations.roll_sum( + notna(x_array + y_array).astype(np.float64), start, end, 0 + ) + result = (mean_x_y - mean_x * mean_y) * (count_x_y / (count_x_y - ddof)) + return Series(result, index=x.index, name=x.name, copy=False) + + return self._apply_pairwise( + self._selected_obj, other, pairwise, cov_func, numeric_only + ) + + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + if self.step is not None: + raise NotImplementedError("step not implemented for corr") + self._validate_numeric_only("corr", numeric_only) + + from pandas import Series + + def corr_func(x, y): + x_array = self._prep_values(x) + y_array = self._prep_values(y) + window_indexer = self._get_window_indexer() + min_periods = ( + self.min_periods + if self.min_periods is not None + else window_indexer.window_size + ) + start, end = window_indexer.get_window_bounds( + num_values=len(x_array), + min_periods=min_periods, + center=self.center, + closed=self.closed, + step=self.step, + ) + self._check_window_bounds(start, end, len(x_array)) + + with np.errstate(all="ignore"): + mean_x_y = window_aggregations.roll_mean( + x_array * y_array, start, end, min_periods + ) + mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods) + mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods) + count_x_y = window_aggregations.roll_sum( + notna(x_array + y_array).astype(np.float64), start, end, 0 + ) + x_var = window_aggregations.roll_var( + x_array, start, end, min_periods, ddof + ) + y_var = window_aggregations.roll_var( + y_array, start, end, min_periods, ddof + ) + numerator = (mean_x_y - mean_x * mean_y) * ( + count_x_y / (count_x_y - ddof) + ) + denominator = (x_var * y_var) ** 0.5 + result = numerator / denominator + return Series(result, index=x.index, name=x.name, copy=False) + + return self._apply_pairwise( + self._selected_obj, other, pairwise, corr_func, numeric_only + ) + + +class Rolling(RollingAndExpandingMixin): + _attributes: list[str] = [ + "window", + "min_periods", + "center", + "win_type", + "axis", + "on", + "closed", + "step", + "method", + ] + + def _validate(self): + super()._validate() + + # we allow rolling on a datetimelike index + if ( + self.obj.empty + or isinstance(self._on, (DatetimeIndex, TimedeltaIndex, PeriodIndex)) + or (isinstance(self._on.dtype, ArrowDtype) and self._on.dtype.kind in "mM") + ) and isinstance(self.window, (str, BaseOffset, timedelta)): + self._validate_datetimelike_monotonic() + + # this will raise ValueError on non-fixed freqs + try: + freq = to_offset(self.window) + except (TypeError, ValueError) as err: + raise ValueError( + f"passed window {self.window} is not " + "compatible with a datetimelike index" + ) from err + if isinstance(self._on, PeriodIndex): + # error: Incompatible types in assignment (expression has type + # "float", variable has type "Optional[int]") + self._win_freq_i8 = freq.nanos / ( # type: ignore[assignment] + self._on.freq.nanos / self._on.freq.n + ) + else: + try: + unit = dtype_to_unit(self._on.dtype) # type: ignore[arg-type] + except TypeError: + # if not a datetime dtype, eg for empty dataframes + unit = "ns" + self._win_freq_i8 = Timedelta(freq.nanos).as_unit(unit)._value + + # min_periods must be an integer + if self.min_periods is None: + self.min_periods = 1 + + if self.step is not None: + raise NotImplementedError( + "step is not supported with frequency windows" + ) + + elif isinstance(self.window, BaseIndexer): + # Passed BaseIndexer subclass should handle all other rolling kwargs + pass + elif not is_integer(self.window) or self.window < 0: + raise ValueError("window must be an integer 0 or greater") + + def _validate_datetimelike_monotonic(self) -> None: + """ + Validate self._on is monotonic (increasing or decreasing) and has + no NaT values for frequency windows. + """ + if self._on.hasnans: + self._raise_monotonic_error("values must not have NaT") + if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing): + self._raise_monotonic_error("values must be monotonic") + + def _raise_monotonic_error(self, msg: str): + on = self.on + if on is None: + if self.axis == 0: + on = "index" + else: + on = "column" + raise ValueError(f"{on} {msg}") + + @doc( + _shared_docs["aggregate"], + see_also=dedent( + """ + See Also + -------- + pandas.Series.rolling : Calling object with Series data. + pandas.DataFrame.rolling : Calling object with DataFrame data. + """ + ), + examples=dedent( + """ + Examples + -------- + >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}) + >>> df + A B C + 0 1 4 7 + 1 2 5 8 + 2 3 6 9 + + >>> df.rolling(2).sum() + A B C + 0 NaN NaN NaN + 1 3.0 9.0 15.0 + 2 5.0 11.0 17.0 + + >>> df.rolling(2).agg({"A": "sum", "B": "min"}) + A B + 0 NaN NaN + 1 3.0 4.0 + 2 5.0 5.0 + """ + ), + klass="Series/Dataframe", + axis="", + ) + def aggregate(self, func, *args, **kwargs): + return super().aggregate(func, *args, **kwargs) + + agg = aggregate + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([2, 3, np.nan, 10]) + >>> s.rolling(2).count() + 0 NaN + 1 2.0 + 2 1.0 + 3 1.0 + dtype: float64 + >>> s.rolling(3).count() + 0 NaN + 1 NaN + 2 2.0 + 3 2.0 + dtype: float64 + >>> s.rolling(4).count() + 0 NaN + 1 NaN + 2 NaN + 3 3.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="count of non NaN observations", + agg_method="count", + ) + def count(self, numeric_only: bool = False): + return super().count(numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + window_apply_parameters, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 6, 5, 4]) + >>> ser.rolling(2).apply(lambda s: s.sum() - s.min()) + 0 NaN + 1 6.0 + 2 6.0 + 3 5.0 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="custom aggregation function", + agg_method="apply", + ) + def apply( + self, + func: Callable[..., Any], + raw: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + args: tuple[Any, ...] | None = None, + kwargs: dict[str, Any] | None = None, + ): + return super().apply( + func, + raw=raw, + engine=engine, + engine_kwargs=engine_kwargs, + args=args, + kwargs=kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([1, 2, 3, 4, 5]) + >>> s + 0 1 + 1 2 + 2 3 + 3 4 + 4 5 + dtype: int64 + + >>> s.rolling(3).sum() + 0 NaN + 1 NaN + 2 6.0 + 3 9.0 + 4 12.0 + dtype: float64 + + >>> s.rolling(3, center=True).sum() + 0 NaN + 1 6.0 + 2 9.0 + 3 12.0 + 4 NaN + dtype: float64 + + For DataFrame, each sum is computed column-wise. + + >>> df = pd.DataFrame({{"A": s, "B": s ** 2}}) + >>> df + A B + 0 1 1 + 1 2 4 + 2 3 9 + 3 4 16 + 4 5 25 + + >>> df.rolling(3).sum() + A B + 0 NaN NaN + 1 NaN NaN + 2 6.0 14.0 + 3 9.0 29.0 + 4 12.0 50.0 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="sum", + agg_method="sum", + ) + def sum( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().sum( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 2, 3, 4]) + >>> ser.rolling(2).max() + 0 NaN + 1 2.0 + 2 3.0 + 3 4.0 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="maximum", + agg_method="max", + ) + def max( + self, + numeric_only: bool = False, + *args, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + **kwargs, + ): + return super().max( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """ + Performing a rolling minimum with a window size of 3. + + >>> s = pd.Series([4, 3, 5, 2, 6]) + >>> s.rolling(3).min() + 0 NaN + 1 NaN + 2 3.0 + 3 2.0 + 4 2.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="minimum", + agg_method="min", + ) + def min( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().min( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """ + The below examples will show rolling mean calculations with window sizes of + two and three, respectively. + + >>> s = pd.Series([1, 2, 3, 4]) + >>> s.rolling(2).mean() + 0 NaN + 1 1.5 + 2 2.5 + 3 3.5 + dtype: float64 + + >>> s.rolling(3).mean() + 0 NaN + 1 NaN + 2 2.0 + 3 3.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="mean", + agg_method="mean", + ) + def mean( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().mean( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + window_agg_numba_parameters(), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + numba_notes, + create_section_header("Examples"), + dedent( + """ + Compute the rolling median of a series with a window size of 3. + + >>> s = pd.Series([0, 1, 2, 3, 4]) + >>> s.rolling(3).median() + 0 NaN + 1 NaN + 2 1.0 + 3 2.0 + 4 3.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="median", + agg_method="median", + ) + def median( + self, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().median( + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + window_agg_numba_parameters("1.4"), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "numpy.std : Equivalent method for NumPy array.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + The default ``ddof`` of 1 used in :meth:`Series.std` is different + than the default ``ddof`` of 0 in :func:`numpy.std`. + + A minimum of one period is required for the rolling calculation.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + >>> s.rolling(3).std() + 0 NaN + 1 NaN + 2 0.577350 + 3 1.000000 + 4 1.000000 + 5 1.154701 + 6 0.000000 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="standard deviation", + agg_method="std", + ) + def std( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().std( + ddof=ddof, + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + window_agg_numba_parameters("1.4"), + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "numpy.var : Equivalent method for NumPy array.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + The default ``ddof`` of 1 used in :meth:`Series.var` is different + than the default ``ddof`` of 0 in :func:`numpy.var`. + + A minimum of one period is required for the rolling calculation.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5]) + >>> s.rolling(3).var() + 0 NaN + 1 NaN + 2 0.333333 + 3 1.000000 + 4 1.000000 + 5 1.333333 + 6 0.000000 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="variance", + agg_method="var", + ) + def var( + self, + ddof: int = 1, + numeric_only: bool = False, + engine: Literal["cython", "numba"] | None = None, + engine_kwargs: dict[str, bool] | None = None, + ): + return super().var( + ddof=ddof, + numeric_only=numeric_only, + engine=engine, + engine_kwargs=engine_kwargs, + ) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "scipy.stats.skew : Third moment of a probability density.\n", + template_see_also, + create_section_header("Notes"), + dedent( + """ + A minimum of three periods is required for the rolling calculation.\n + """ + ), + create_section_header("Examples"), + dedent( + """\ + >>> ser = pd.Series([1, 5, 2, 7, 15, 6]) + >>> ser.rolling(3).skew().round(6) + 0 NaN + 1 NaN + 2 1.293343 + 3 -0.585583 + 4 0.670284 + 5 1.652317 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="unbiased skewness", + agg_method="skew", + ) + def skew(self, numeric_only: bool = False): + return super().skew(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Notes"), + "A minimum of one period is required for the calculation.\n\n", + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([0, 1, 2, 3]) + >>> s.rolling(2, min_periods=1).sem() + 0 NaN + 1 0.707107 + 2 0.707107 + 3 0.707107 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="standard error of mean", + agg_method="sem", + ) + def sem(self, ddof: int = 1, numeric_only: bool = False): + # Raise here so error message says sem instead of std + self._validate_numeric_only("sem", numeric_only) + return self.std(numeric_only=numeric_only) / ( + self.count(numeric_only) - ddof + ).pow(0.5) + + @doc( + template_header, + create_section_header("Parameters"), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + "scipy.stats.kurtosis : Reference SciPy method.\n", + template_see_also, + create_section_header("Notes"), + "A minimum of four periods is required for the calculation.\n\n", + create_section_header("Examples"), + dedent( + """ + The example below will show a rolling calculation with a window size of + four matching the equivalent function call using `scipy.stats`. + + >>> arr = [1, 2, 3, 4, 999] + >>> import scipy.stats + >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}") + -1.200000 + >>> print(f"{{scipy.stats.kurtosis(arr[1:], bias=False):.6f}}") + 3.999946 + >>> s = pd.Series(arr) + >>> s.rolling(4).kurt() + 0 NaN + 1 NaN + 2 NaN + 3 -1.200000 + 4 3.999946 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="Fisher's definition of kurtosis without bias", + agg_method="kurt", + ) + def kurt(self, numeric_only: bool = False): + return super().kurt(numeric_only=numeric_only) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + quantile : float + Quantile to compute. 0 <= quantile <= 1. + + .. deprecated:: 2.1.0 + This will be renamed to 'q' in a future version. + interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}} + This optional parameter specifies the interpolation method to use, + when the desired quantile lies between two data points `i` and `j`: + + * linear: `i + (j - i) * fraction`, where `fraction` is the + fractional part of the index surrounded by `i` and `j`. + * lower: `i`. + * higher: `j`. + * nearest: `i` or `j` whichever is nearest. + * midpoint: (`i` + `j`) / 2. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([1, 2, 3, 4]) + >>> s.rolling(2).quantile(.4, interpolation='lower') + 0 NaN + 1 1.0 + 2 2.0 + 3 3.0 + dtype: float64 + + >>> s.rolling(2).quantile(.4, interpolation='midpoint') + 0 NaN + 1 1.5 + 2 2.5 + 3 3.5 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="quantile", + agg_method="quantile", + ) + @deprecate_kwarg(old_arg_name="quantile", new_arg_name="q") + def quantile( + self, + q: float, + interpolation: QuantileInterpolation = "linear", + numeric_only: bool = False, + ): + return super().quantile( + q=q, + interpolation=interpolation, + numeric_only=numeric_only, + ) + + @doc( + template_header, + ".. versionadded:: 1.4.0 \n\n", + create_section_header("Parameters"), + dedent( + """ + method : {{'average', 'min', 'max'}}, default 'average' + How to rank the group of records that have the same value (i.e. ties): + + * average: average rank of the group + * min: lowest rank in the group + * max: highest rank in the group + + ascending : bool, default True + Whether or not the elements should be ranked in ascending order. + pct : bool, default False + Whether or not to display the returned rankings in percentile + form. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """ + >>> s = pd.Series([1, 4, 2, 3, 5, 3]) + >>> s.rolling(3).rank() + 0 NaN + 1 NaN + 2 2.0 + 3 2.0 + 4 3.0 + 5 1.5 + dtype: float64 + + >>> s.rolling(3).rank(method="max") + 0 NaN + 1 NaN + 2 2.0 + 3 2.0 + 4 3.0 + 5 2.0 + dtype: float64 + + >>> s.rolling(3).rank(method="min") + 0 NaN + 1 NaN + 2 2.0 + 3 2.0 + 4 3.0 + 5 1.0 + dtype: float64 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="rank", + agg_method="rank", + ) + def rank( + self, + method: WindowingRankType = "average", + ascending: bool = True, + pct: bool = False, + numeric_only: bool = False, + ): + return super().rank( + method=method, + ascending=ascending, + pct=pct, + numeric_only=numeric_only, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + template_see_also, + create_section_header("Examples"), + dedent( + """\ + >>> ser1 = pd.Series([1, 2, 3, 4]) + >>> ser2 = pd.Series([1, 4, 5, 8]) + >>> ser1.rolling(2).cov(ser2) + 0 NaN + 1 1.5 + 2 0.5 + 3 1.5 + dtype: float64 + """ + ), + window_method="rolling", + aggregation_description="sample covariance", + agg_method="cov", + ) + def cov( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + return super().cov( + other=other, + pairwise=pairwise, + ddof=ddof, + numeric_only=numeric_only, + ) + + @doc( + template_header, + create_section_header("Parameters"), + dedent( + """ + other : Series or DataFrame, optional + If not supplied then will default to self and produce pairwise + output. + pairwise : bool, default None + If False then only matching columns between self and other will be + used and the output will be a DataFrame. + If True then all pairwise combinations will be calculated and the + output will be a MultiIndexed DataFrame in the case of DataFrame + inputs. In the case of missing elements, only complete pairwise + observations will be used. + ddof : int, default 1 + Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + """ + ).replace("\n", "", 1), + kwargs_numeric_only, + create_section_header("Returns"), + template_returns, + create_section_header("See Also"), + dedent( + """ + cov : Similar method to calculate covariance. + numpy.corrcoef : NumPy Pearson's correlation calculation. + """ + ).replace("\n", "", 1), + template_see_also, + create_section_header("Notes"), + dedent( + """ + This function uses Pearson's definition of correlation + (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient). + + When `other` is not specified, the output will be self correlation (e.g. + all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise` + set to `True`. + + Function will return ``NaN`` for correlations of equal valued sequences; + this is the result of a 0/0 division error. + + When `pairwise` is set to `False`, only matching columns between `self` and + `other` will be used. + + When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame + with the original index on the first level, and the `other` DataFrame + columns on the second level. + + In the case of missing elements, only complete pairwise observations + will be used.\n + """ + ).replace("\n", "", 1), + create_section_header("Examples"), + dedent( + """ + The below example shows a rolling calculation with a window size of + four matching the equivalent function call using :meth:`numpy.corrcoef`. + + >>> v1 = [3, 3, 3, 5, 8] + >>> v2 = [3, 4, 4, 4, 8] + >>> np.corrcoef(v1[:-1], v2[:-1]) + array([[1. , 0.33333333], + [0.33333333, 1. ]]) + >>> np.corrcoef(v1[1:], v2[1:]) + array([[1. , 0.9169493], + [0.9169493, 1. ]]) + >>> s1 = pd.Series(v1) + >>> s2 = pd.Series(v2) + >>> s1.rolling(4).corr(s2) + 0 NaN + 1 NaN + 2 NaN + 3 0.333333 + 4 0.916949 + dtype: float64 + + The below example shows a similar rolling calculation on a + DataFrame using the pairwise option. + + >>> matrix = np.array([[51., 35.], + ... [49., 30.], + ... [47., 32.], + ... [46., 31.], + ... [50., 36.]]) + >>> np.corrcoef(matrix[:-1, 0], matrix[:-1, 1]) + array([[1. , 0.6263001], + [0.6263001, 1. ]]) + >>> np.corrcoef(matrix[1:, 0], matrix[1:, 1]) + array([[1. , 0.55536811], + [0.55536811, 1. ]]) + >>> df = pd.DataFrame(matrix, columns=['X', 'Y']) + >>> df + X Y + 0 51.0 35.0 + 1 49.0 30.0 + 2 47.0 32.0 + 3 46.0 31.0 + 4 50.0 36.0 + >>> df.rolling(4).corr(pairwise=True) + X Y + 0 X NaN NaN + Y NaN NaN + 1 X NaN NaN + Y NaN NaN + 2 X NaN NaN + Y NaN NaN + 3 X 1.000000 0.626300 + Y 0.626300 1.000000 + 4 X 1.000000 0.555368 + Y 0.555368 1.000000 + """ + ).replace("\n", "", 1), + window_method="rolling", + aggregation_description="correlation", + agg_method="corr", + ) + def corr( + self, + other: DataFrame | Series | None = None, + pairwise: bool | None = None, + ddof: int = 1, + numeric_only: bool = False, + ): + return super().corr( + other=other, + pairwise=pairwise, + ddof=ddof, + numeric_only=numeric_only, + ) + + +Rolling.__doc__ = Window.__doc__ + + +class RollingGroupby(BaseWindowGroupby, Rolling): + """ + Provide a rolling groupby implementation. + """ + + _attributes = Rolling._attributes + BaseWindowGroupby._attributes + + def _get_window_indexer(self) -> GroupbyIndexer: + """ + Return an indexer class that will compute the window start and end bounds + + Returns + ------- + GroupbyIndexer + """ + rolling_indexer: type[BaseIndexer] + indexer_kwargs: dict[str, Any] | None = None + index_array = self._index_array + if isinstance(self.window, BaseIndexer): + rolling_indexer = type(self.window) + indexer_kwargs = self.window.__dict__.copy() + assert isinstance(indexer_kwargs, dict) # for mypy + # We'll be using the index of each group later + indexer_kwargs.pop("index_array", None) + window = self.window + elif self._win_freq_i8 is not None: + rolling_indexer = VariableWindowIndexer + # error: Incompatible types in assignment (expression has type + # "int", variable has type "BaseIndexer") + window = self._win_freq_i8 # type: ignore[assignment] + else: + rolling_indexer = FixedWindowIndexer + window = self.window + window_indexer = GroupbyIndexer( + index_array=index_array, + window_size=window, + groupby_indices=self._grouper.indices, + window_indexer=rolling_indexer, + indexer_kwargs=indexer_kwargs, + ) + return window_indexer + + def _validate_datetimelike_monotonic(self): + """ + Validate that each group in self._on is monotonic + """ + # GH 46061 + if self._on.hasnans: + self._raise_monotonic_error("values must not have NaT") + for group_indices in self._grouper.indices.values(): + group_on = self._on.take(group_indices) + if not ( + group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing + ): + on = "index" if self.on is None else self.on + raise ValueError( + f"Each group within {on} must be monotonic. " + f"Sort the values in {on} first." + )